code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This file is part of Rudder. * * Rudder is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU General Public License version 3, the copyright holders add * the following Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General * Public License version 3, when you create a Related Module, this * Related Module is not considered as a part of the work and may be * distributed under the license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * Rudder is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Rudder. If not, see <http://www.gnu.org/licenses/>. * ************************************************************************************* */ package com.normation.rudder.web.services import com.normation.rudder.domain.policies.DirectiveId import com.normation.cfclerk.domain.Variable import com.normation.cfclerk.domain.VariableSpec import com.normation.rudder.web.model.{ DirectiveEditor } import com.normation.cfclerk.services.TechniqueRepository import net.liftweb.common._ import Box._ import com.normation.cfclerk.domain.{ TechniqueId, Technique } import org.joda.time.{ LocalDate, LocalTime, Duration, DateTime } import com.normation.cfclerk.domain.PredefinedValuesVariableSpec trait DirectiveEditorService { /** * Retrieve a policyEditor given the Directive name, * if such Directive is known in the system */ def get( techniqueId: TechniqueId, directiveId: DirectiveId, //withExecutionPlanning:Option[TemporalVariableVal] = None, withVars: Map[String, Seq[String]] = Map()): Box[DirectiveEditor] } class DirectiveEditorServiceImpl( techniqueRepository: TechniqueRepository, section2FieldService: Section2FieldService) extends DirectiveEditorService { /** * Retrieve vars for the given Directive. * First, we try to retrieve default vars * from the techniqueRepository. * Then, we look in the parameter vars to * search for vars with the same name. * For each found, we change the default * var value by the parameter one. */ import scala.util.control.Breaks._ /* * We exactly set the variable values to varValues, * so a missing variable key actually set the value * to Seq() */ private def getVars(allVars: Seq[VariableSpec], vars: Map[String, Seq[String]]): Seq[Variable] = { allVars.map { varSpec => varSpec match { case spec : PredefinedValuesVariableSpec => // variables values are already builtin spec.toVariable() case _ => // variable can be modified varSpec.toVariable(vars.getOrElse(varSpec.name, Seq())) } } } override def get( techniqueId: TechniqueId, directiveId: DirectiveId, withVarValues: Map[String, Seq[String]] = Map()): Box[DirectiveEditor] = { for { //start by checking Directive existence pol <- techniqueRepository.get(techniqueId) ?~! s"Error when looking for technique with ID '${techniqueId}'. Check technique name and version" allVars = pol.rootSection.getAllVariables vars = getVars(allVars, withVarValues) pe <- section2FieldService.initDirectiveEditor(pol, directiveId, vars) } yield pe } }
armeniaca/rudder
rudder-web/src/main/scala/com/normation/rudder/web/services/DirectiveEditorService.scala
Scala
gpl-3.0
4,184
package app import scalajs.js, js.Dynamic.{global => g}, js.JSConverters._ import wav.devtools.sbt.httpserver.buildservice.BuildService object App extends js.JSApp { implicit def `f2->undefjs`[P1,P2,R](f: (P1,P2) => R) = Some(f: js.Function2[P1, P2, R]).orUndefined def onBuildEvent(project: String, event: String): Unit = g.console.debug("scalajs.onBuildEvent", project, event) def main(): Unit = { BuildService.configure { c => c.onBuildEvent = onBuildEvent _ } BuildService().foreach(_.start()) } }
wav/sbt-httpserver
examples/build-services-sjs/src/main/scala/app/App.scala
Scala
apache-2.0
541
package jp.co.dwango.s99 import org.scalacheck.Properties import org.scalacheck.Prop.{forAll, BooleanOperators} class P02Check extends Properties("P02") { property("penultimate()") = forAll { (s: List[Int]) => (s.length >= 2) ==> (P02.penultimate(s) == s(s.length - 2)) } }
dwango/S99
src/test/scala/jp/co/dwango/s99/P02Check.scala
Scala
mit
284
package fr.winbee import org.scalatest.{Matchers, FlatSpec} abstract class UnitSpec extends FlatSpec with Matchers
Winbee/gnuCashExtractor
src/test/scala/fr/winbee/UnitSpec.scala
Scala
gpl-2.0
117
/* * Copyright 2015 Priyesh Patel * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.priyesh.litho package core import java.io.{OutputStream, OutputStreamWriter} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Promise import scala.sys.process._ object AndroidBridge { private def pushToSd(source: String): CanFail = if (s"adb push $source /sdcard/Litho/".! == 0) Succeeded else Failed private def copyFontsToSystemAndReboot(deviceId: String): Unit = { val inputStreamPromise = Promise[OutputStream]() def writeCommands(inputStream: OutputStream): Unit = { val buffered = new OutputStreamWriter(inputStream) buffered.write("su\\n") buffered.write("mount -o rw,remount /system\\n") buffered.write("cp /sdcard/Litho/*.ttf /system/fonts/\\n") buffered.write("exit\\n") buffered.write("exit\\n") buffered.close() } s"adb shell -s $deviceId".run(new ProcessIO({ inputStream => inputStreamPromise.success(inputStream) }, { outputStream => print(outputStream.read()) inputStreamPromise.future.foreach(writeCommands) BasicIO.toStdOut(outputStream) }, { _ => })) println("Process finished") (5 to 1 by -1).foreach(n => { Thread.sleep(1000) println(s"Rebooting in $n seconds") }) "adb reboot".! } def connectedDevices(): String = "adb devices".!! def install(deviceId: String, folderName: String): CanFail = { pushToSd("./RobotoFlashable.zip") then pushToSd(folderName) foreach { copyFontsToSystemAndReboot(deviceId) println("Installation complete!") } } }
ItsPriyesh/Litho
src/me/priyesh/litho/core/AndroidBridge.scala
Scala
apache-2.0
2,158
package org.bfn.ninetynineprobs object P33 { def isCoprimeTo(a : Integer, b : Integer) = P32.gcd(a, b) == 1 }
bfontaine/99Scala
src/main/scala/P33.scala
Scala
mit
119
package play.api.mvc import scala.language.reflectiveCalls import java.io._ import scala.xml._ import play.api._ import play.api.libs.json._ import play.api.libs.iteratee._ import play.api.libs.iteratee.Input._ import play.api.libs.iteratee.Parsing._ import play.api.libs.Files.{ TemporaryFile } import Results._ import MultipartFormData._ /** * A request body that adapts automatically according the request Content-Type. */ sealed trait AnyContent { /** * application/form-url-encoded */ def asFormUrlEncoded: Option[Map[String, Seq[String]]] = this match { case AnyContentAsFormUrlEncoded(data) => Some(data) case _ => None } /** * text/plain */ def asText: Option[String] = this match { case AnyContentAsText(txt) => Some(txt) case _ => None } /** * text/xml */ def asXml: Option[NodeSeq] = this match { case AnyContentAsXml(xml) => Some(xml) case _ => None } /** * text/json or application/json */ def asJson: Option[JsValue] = this match { case AnyContentAsJson(json) => Some(json) case _ => None } /** * multipart/form-data */ def asMultipartFormData: Option[MultipartFormData[TemporaryFile]] = this match { case AnyContentAsMultipartFormData(mfd) => Some(mfd) case _ => None } /** * Used when no Content-Type matches */ def asRaw: Option[RawBuffer] = this match { case AnyContentAsRaw(raw) => Some(raw) case _ => None } } /** * AnyContent - Empty request body */ case object AnyContentAsEmpty extends AnyContent /** * AnyContent - Text body */ case class AnyContentAsText(txt: String) extends AnyContent /** * AnyContent - Form url encoded body */ case class AnyContentAsFormUrlEncoded(data: Map[String, Seq[String]]) extends AnyContent /** * AnyContent - Raw body (give access to the raw data as bytes). */ case class AnyContentAsRaw(raw: RawBuffer) extends AnyContent /** * AnyContent - XML body */ case class AnyContentAsXml(xml: NodeSeq) extends AnyContent /** * AnyContent - Json body */ case class AnyContentAsJson(json: JsValue) extends AnyContent /** * AnyContent - Multipart form data body */ case class AnyContentAsMultipartFormData(mdf: MultipartFormData[TemporaryFile]) extends AnyContent /** * Multipart form data body. */ case class MultipartFormData[A](dataParts: Map[String, Seq[String]], files: Seq[FilePart[A]], badParts: Seq[BadPart], missingFileParts: Seq[MissingFilePart]) { // For binary compatibility with 2.1.0 def this(dataParts: Map[String, Seq[String]], files: Seq[FilePart[A]], badParts: Seq[BadPart]) = this(dataParts, files, badParts, Nil) def copy(dataParts: Map[String, Seq[String]] = this.dataParts, files: Seq[FilePart[A]] = this.files, badParts: Seq[BadPart] = this.badParts) = new MultipartFormData(dataParts, files, badParts) /** * Extract the data parts as Form url encoded. */ def asFormUrlEncoded: Map[String, Seq[String]] = dataParts /** * Access a file part. */ def file(key: String): Option[FilePart[A]] = files.find(_.key == key) } /** * Defines parts handled by Multipart form data. */ object MultipartFormData { // For binary compatibility with 2.1.0 def apply[A](dataParts: Map[String, Seq[String]], files: Seq[FilePart[A]], badParts: Seq[BadPart]) = new MultipartFormData[A](dataParts, files, badParts) /** * A part. */ sealed trait Part /** * A data part. */ case class DataPart(key: String, value: String) extends Part /** * A file part. */ case class FilePart[A](key: String, filename: String, contentType: Option[String], ref: A) extends Part /** * A file part with no content provided. */ case class MissingFilePart(key: String) extends Part /** * A part that has not been properly parsed. */ case class BadPart(headers: Map[String, String]) extends Part /** * A data part that has excedeed the max size allowed. */ case class MaxDataPartSizeExceeded(key: String) extends Part } /** * Handle the request body a raw bytes data. * * @param memoryThreshold If the content size is bigger than this limit, the content is stored as file. */ case class RawBuffer(memoryThreshold: Int, initialData: Array[Byte] = Array.empty[Byte]) { import play.api.libs.Files._ import scala.collection.mutable._ private var inMemory = new ArrayBuffer[Byte] ++= initialData private var backedByTemporaryFile: TemporaryFile = _ private var outStream: OutputStream = _ private[play] def push(chunk: Array[Byte]) { if (inMemory != null) { inMemory ++= chunk if (inMemory.size > memoryThreshold) { backToTemporaryFile() } } else { outStream.write(chunk) } } private[play] def close() { if (outStream != null) { outStream.close() } } private[play] def backToTemporaryFile() { backedByTemporaryFile = TemporaryFile("requestBody", "asRaw") outStream = new FileOutputStream(backedByTemporaryFile.file) outStream.write(inMemory.toArray) inMemory = null } /** * Buffer size. */ def size: Long = { if (inMemory != null) inMemory.size else backedByTemporaryFile.file.length } /** * Returns the buffer content as a bytes array. * * @param maxLength The max length allowed to be stored in memory. * @return None if the content is too big to fit in memory. */ def asBytes(maxLength: Int = memoryThreshold): Option[Array[Byte]] = { if (size <= maxLength) { if (inMemory != null) { Some(inMemory.toArray) } else { val inStream = new FileInputStream(backedByTemporaryFile.file) try { val buffer = new Array[Byte](size.toInt) inStream.read(buffer) Some(buffer) } finally { inStream.close() } } } else { None } } /** * Returns the buffer content as File. */ def asFile: File = { if (inMemory != null) { backToTemporaryFile() close() } backedByTemporaryFile.file } override def toString = { "RawBuffer(inMemory=" + Option(inMemory).map(_.size).orNull + ", backedByTemporaryFile=" + backedByTemporaryFile + ")" } } /** * Default body parsers. */ trait BodyParsers { /** * Default body parsers. */ object parse { /** * Unlimited size. */ val UNLIMITED: Int = Integer.MAX_VALUE /** * Default max length allowed for text based body. * * You can configure it in application.conf: * * {{{ * parsers.text.maxLength = 512k * }}} */ lazy val DEFAULT_MAX_TEXT_LENGTH: Int = Play.maybeApplication.flatMap { app => app.configuration.getBytes("parsers.text.maxLength").map(_.toInt) }.getOrElse(1024 * 100) // -- Text parser /** * Parse the body as text without checking the Content-Type. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def tolerantText(maxLength: Int): BodyParser[String] = BodyParser("text, maxLength=" + maxLength) { request => Traversable.takeUpTo[Array[Byte]](maxLength) .transform(Iteratee.consume[Array[Byte]]().map(c => new String(c, request.charset.getOrElse("utf-8")))) .flatMap(Iteratee.eofOrElse(Results.EntityTooLarge)) } /** * Parse the body as text without checking the Content-Type. */ def tolerantText: BodyParser[String] = tolerantText(DEFAULT_MAX_TEXT_LENGTH) /** * Parse the body as text if the Content-Type is text/plain. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def text(maxLength: Int): BodyParser[String] = when( _.contentType.exists(_ == "text/plain"), tolerantText(maxLength), request => Play.maybeApplication.map(_.global.onBadRequest(request, "Expecting text/plain body")).getOrElse(Results.BadRequest) ) /** * Parse the body as text if the Content-Type is text/plain. */ def text: BodyParser[String] = text(DEFAULT_MAX_TEXT_LENGTH) // -- Raw parser /** * Store the body content in a RawBuffer. * * @param memoryThreshold If the content size is bigger than this limit, the content is stored as file. */ def raw(memoryThreshold: Int): BodyParser[RawBuffer] = BodyParser("raw, memoryThreshold=" + memoryThreshold) { request => val buffer = RawBuffer(memoryThreshold) Iteratee.foreach[Array[Byte]](bytes => buffer.push(bytes)).mapDone { _ => buffer.close() Right(buffer) } } /** * Store the body content in a RawBuffer. */ def raw: BodyParser[RawBuffer] = raw(memoryThreshold = 100 * 1024) // -- JSON parser /** * Parse the body as Json without checking the Content-Type. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def tolerantJson(maxLength: Int): BodyParser[JsValue] = BodyParser("json, maxLength=" + maxLength) { request => Traversable.takeUpTo[Array[Byte]](maxLength).apply(Iteratee.consume[Array[Byte]]().map { bytes => scala.util.control.Exception.allCatch[JsValue].either { Json.parse(new String(bytes, request.charset.getOrElse("utf-8"))) }.left.map { e => (Play.maybeApplication.map(_.global.onBadRequest(request, "Invalid Json")).getOrElse(Results.BadRequest), bytes) } }).flatMap(Iteratee.eofOrElse(Results.EntityTooLarge)) .flatMap { case Left(b) => Done(Left(b), Empty) case Right(it) => it.flatMap { case Left((r, in)) => Done(Left(r), El(in)) case Right(json) => Done(Right(json), Empty) } } } /** * Parse the body as Json without checking the Content-Type. */ def tolerantJson: BodyParser[JsValue] = tolerantJson(DEFAULT_MAX_TEXT_LENGTH) /** * Parse the body as Json if the Content-Type is text/json or application/json. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def json(maxLength: Int): BodyParser[JsValue] = when( _.contentType.exists(m => m == "text/json" || m == "application/json"), tolerantJson(maxLength), request => Play.maybeApplication.map(_.global.onBadRequest(request, "Expecting text/json or application/json body")).getOrElse(Results.BadRequest) ) /** * Parse the body as Json if the Content-Type is text/json or application/json. */ def json: BodyParser[JsValue] = json(DEFAULT_MAX_TEXT_LENGTH) // -- Empty parser /** * Don't parse the body content. */ def empty: BodyParser[Option[Any]] = BodyParser("empty") { request => Done(Right(None), Empty) } // -- XML parser /** * Parse the body as Xml without checking the Content-Type. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def tolerantXml(maxLength: Int): BodyParser[NodeSeq] = BodyParser("xml, maxLength=" + maxLength) { request => Traversable.takeUpTo[Array[Byte]](maxLength).apply(Iteratee.consume[Array[Byte]]().mapDone { bytes => scala.util.control.Exception.allCatch[NodeSeq].either { XML.loadString(new String(bytes, request.charset.getOrElse("utf-8"))) }.left.map { e => (Play.maybeApplication.map(_.global.onBadRequest(request, "Invalid XML")).getOrElse(Results.BadRequest), bytes) } }).flatMap(Iteratee.eofOrElse(Results.EntityTooLarge)) .flatMap { case Left(b) => Done(Left(b), Empty) case Right(it) => it.flatMap { case Left((r, in)) => Done(Left(r), El(in)) case Right(xml) => Done(Right(xml), Empty) } } } /** * Parse the body as Xml without checking the Content-Type. */ def tolerantXml: BodyParser[NodeSeq] = tolerantXml(DEFAULT_MAX_TEXT_LENGTH) /** * Parse the body as Xml if the Content-Type is text/xml. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def xml(maxLength: Int): BodyParser[NodeSeq] = when( _.contentType.exists(_.startsWith("text/xml")), tolerantXml(maxLength), request => Play.maybeApplication.map(_.global.onBadRequest(request, "Expecting text/xml body")).getOrElse(Results.BadRequest) ) /** * Parse the body as Xml if the Content-Type is text/xml. */ def xml: BodyParser[NodeSeq] = xml(DEFAULT_MAX_TEXT_LENGTH) // -- File parsers /** * Store the body content into a file. * * @param to The file used to store the content. */ def file(to: File): BodyParser[File] = BodyParser("file, to=" + to) { request => Iteratee.fold[Array[Byte], FileOutputStream](new FileOutputStream(to)) { (os, data) => os.write(data) os }.mapDone { os => os.close() Right(to) } } /** * Store the body content into a temporary file. */ def temporaryFile: BodyParser[TemporaryFile] = BodyParser("temporaryFile") { request => val tempFile = TemporaryFile("requestBody", "asTemporaryFile") file(tempFile.file)(request).mapDone(_ => Right(tempFile)) } // -- FormUrlEncoded /** * Parse the body as Form url encoded without checking the Content-Type. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def tolerantFormUrlEncoded(maxLength: Int): BodyParser[Map[String, Seq[String]]] = BodyParser("urlFormEncoded, maxLength=" + maxLength) { request => import play.core.parsers._ import scala.collection.JavaConverters._ Traversable.takeUpTo[Array[Byte]](maxLength).apply(Iteratee.consume[Array[Byte]]().mapDone { c => scala.util.control.Exception.allCatch[Map[String, Seq[String]]].either { FormUrlEncodedParser.parse(new String(c, request.charset.getOrElse("utf-8")), request.charset.getOrElse("utf-8")) }.left.map { e => Play.maybeApplication.map(_.global.onBadRequest(request, "Error parsing application/x-www-form-urlencoded")).getOrElse(Results.BadRequest) } }).flatMap(Iteratee.eofOrElse(Results.EntityTooLarge)) .flatMap { case Left(b) => Done(Left(b), Empty) case Right(it) => it.flatMap { case Left(r) => Done(Left(r), Empty) case Right(urlEncoded) => Done(Right(urlEncoded), Empty) } } } /** * Parse the body as form url encoded without checking the Content-Type. */ def tolerantFormUrlEncoded: BodyParser[Map[String, Seq[String]]] = tolerantFormUrlEncoded(DEFAULT_MAX_TEXT_LENGTH) /** * Parse the body as form url encoded if the Content-Type is application/x-www-form-urlencoded. * * @param maxLength Max length allowed or returns EntityTooLarge HTTP response. */ def urlFormEncoded(maxLength: Int): BodyParser[Map[String, Seq[String]]] = when( _.contentType.exists(_ == "application/x-www-form-urlencoded"), tolerantFormUrlEncoded(maxLength), request => Play.maybeApplication.map(_.global.onBadRequest(request, "Expecting application/x-www-form-urlencoded body")).getOrElse(Results.BadRequest) ) /** * Parse the body as form url encoded if the Content-Type is application/x-www-form-urlencoded. */ def urlFormEncoded: BodyParser[Map[String, Seq[String]]] = urlFormEncoded(DEFAULT_MAX_TEXT_LENGTH) // -- Magic any content /** * Guess the body content by checking the Content-Type header. */ def anyContent: BodyParser[AnyContent] = BodyParser("anyContent") { request => request.contentType match { case _ if request.method == "GET" || request.method == "HEAD" => { Logger("play").trace("Parsing AnyContent as empty") empty(request).map(_.right.map(_ => AnyContentAsEmpty)) } case Some("text/plain") => { Logger("play").trace("Parsing AnyContent as text") text(request).map(_.right.map(s => AnyContentAsText(s))) } case Some("text/xml") => { Logger("play").trace("Parsing AnyContent as xml") xml(request).map(_.right.map(x => AnyContentAsXml(x))) } case Some("text/json") | Some("application/json") => { Logger("play").trace("Parsing AnyContent as json") json(request).map(_.right.map(j => AnyContentAsJson(j))) } case Some("application/x-www-form-urlencoded") => { Logger("play").trace("Parsing AnyContent as urlFormEncoded") urlFormEncoded(request).map(_.right.map(d => AnyContentAsFormUrlEncoded(d))) } case Some("multipart/form-data") => { Logger("play").trace("Parsing AnyContent as multipartFormData") multipartFormData(request).map(_.right.map(m => AnyContentAsMultipartFormData(m))) } case _ => { Logger("play").trace("Parsing AnyContent as raw") raw(request).map(_.right.map(r => AnyContentAsRaw(r))) } } } // -- Multipart /** * Parse the content as multipart/form-data */ def multipartFormData: BodyParser[MultipartFormData[TemporaryFile]] = multipartFormData(Multipart.handleFilePartAsTemporaryFile) /** * Parse the content as multipart/form-data * * @param filePartHandler Handles file parts. */ def multipartFormData[A](filePartHandler: Multipart.PartHandler[FilePart[A]]): BodyParser[MultipartFormData[A]] = BodyParser("multipartFormData") { request => val handler: Multipart.PartHandler[Either[Part, FilePart[A]]] = Multipart.handleDataPart.andThen(_.map(Left(_))) .orElse({ case Multipart.FileInfoMatcher(partName, fileName, _) if fileName.trim.isEmpty => // No file name is what the browser sends if you didn't select a file Done(Left(MissingFilePart(partName)), Input.Empty) }: Multipart.PartHandler[Either[Part, FilePart[A]]]) .orElse(filePartHandler.andThen(_.map(Right(_)))) .orElse { case headers => Done(Left(BadPart(headers)), Input.Empty) } Multipart.multipartParser(handler)(request).map { errorOrParts => errorOrParts.right.map { parts => val data = parts.collect { case Left(DataPart(key, value)) => (key, value) }.groupBy(_._1).mapValues(_.map(_._2)) val az = parts.collect { case Right(a) => a } val bad = parts.collect { case Left(b @ BadPart(_)) => b } val missing = parts.collect { case Left(missing @ MissingFilePart(_)) => missing } MultipartFormData(data, az, bad, missing) } } } object Multipart { def multipartParser[A](partHandler: Map[String, String] => Iteratee[Array[Byte], A]): BodyParser[Seq[A]] = parse.using { request => val maybeBoundary = request.headers.get(play.api.http.HeaderNames.CONTENT_TYPE).filter(ct => ct.trim.startsWith("multipart/form-data")).flatMap { mpCt => mpCt.trim.split("boundary=").tail.headOption.map(b => ("\r\n--" + b).getBytes("utf-8")) } maybeBoundary.map { boundary => BodyParser { request => val CRLF = "\r\n".getBytes val CRLFCRLF = CRLF ++ CRLF val takeUpToBoundary = Enumeratee.takeWhile[MatchInfo[Array[Byte]]](!_.isMatch) val maxHeaderBuffer = Traversable.takeUpTo[Array[Byte]](4 * 1024) transform Iteratee.consume[Array[Byte]]() val collectHeaders = maxHeaderBuffer.map { buffer => val (headerBytes, rest) = Option(buffer.drop(2)).map(b => b.splitAt(b.indexOfSlice(CRLFCRLF))).get val headerString = new String(headerBytes) val headers = headerString.lines.map { header => val key :: value = header.trim.split(":").toList (key.trim.toLowerCase, value.mkString.trim) }.toMap val left = rest.drop(CRLFCRLF.length) (headers, left) } val readPart = collectHeaders.flatMap { case (headers, left) => Iteratee.flatten(partHandler(headers).feed(Input.El(left))) } val handlePart = Enumeratee.map[MatchInfo[Array[Byte]]](_.content).transform(readPart) Traversable.take[Array[Byte]](boundary.size - 2).transform(Iteratee.consume()).flatMap { firstBoundary => Parsing.search(boundary) transform Iteratee.repeat { takeUpToBoundary.transform(handlePart).flatMap { part => Enumeratee.take(1)(Iteratee.ignore[MatchInfo[Array[Byte]]]).mapDone(_ => part) } }.map(parts => Right(parts.dropRight(1))) } } }.getOrElse(parse.error(Play.maybeApplication.map(_.global.onBadRequest(request, "Missing boundary header")).getOrElse(Results.BadRequest))) } type PartHandler[A] = PartialFunction[Map[String, String], Iteratee[Array[Byte], A]] def handleFilePartAsTemporaryFile: PartHandler[FilePart[TemporaryFile]] = { handleFilePart { case FileInfo(partName, filename, contentType) => val tempFile = TemporaryFile("multipartBody", "asTemporaryFile") Iteratee.fold[Array[Byte], FileOutputStream](new java.io.FileOutputStream(tempFile.file)) { (os, data) => os.write(data) os }.mapDone { os => os.close() tempFile } } } case class FileInfo(partName: String, fileName: String, contentType: Option[String]) object FileInfoMatcher { def unapply(headers: Map[String, String]): Option[(String, String, Option[String])] = { val keyValue = """^([a-zA-Z_0-9]+)="(.*)"$""".r for { value <- headers.get("content-disposition") values = value.split(";").map(_.trim).map { case keyValue(key, value) => (key.trim, value.trim) case key => (key.trim, "") }.toMap _ <- values.get("form-data"); partName <- values.get("name"); fileName <- values.get("filename"); contentType = headers.get("content-type") } yield ((partName, fileName, contentType)) } } def handleFilePart[A](handler: FileInfo => Iteratee[Array[Byte], A]): PartHandler[FilePart[A]] = { case FileInfoMatcher(partName, fileName, contentType) => val safeFileName = fileName.split('\\').takeRight(1).mkString handler(FileInfo(partName, safeFileName, contentType)).map(a => FilePart(partName, safeFileName, contentType, a)) } object PartInfoMatcher { def unapply(headers: Map[String, String]): Option[String] = { val keyValue = """^([a-zA-Z_0-9]+)="(.*)"$""".r for { value <- headers.get("content-disposition") values = value.split(";").map(_.trim).map { case keyValue(key, value) => (key.trim, value.trim) case key => (key.trim, "") }.toMap _ <- values.get("form-data"); partName <- values.get("name") } yield (partName) } } def handleDataPart: PartHandler[Part] = { case headers @ PartInfoMatcher(partName) if !FileInfoMatcher.unapply(headers).isDefined => Traversable.takeUpTo[Array[Byte]](DEFAULT_MAX_TEXT_LENGTH) .transform(Iteratee.consume[Array[Byte]]().map(bytes => DataPart(partName, new String(bytes, "utf-8")))) .flatMap { data => Cont({ case Input.El(_) => Done(MaxDataPartSizeExceeded(partName), Input.Empty) case in => Done(data, in) }) } } def handlePart(fileHandler: PartHandler[FilePart[File]]): PartHandler[Part] = { handleDataPart .orElse({ case FileInfoMatcher(partName, fileName, _) if fileName.trim.isEmpty => Done(MissingFilePart(partName), Input.Empty) }: PartHandler[Part]) .orElse(fileHandler) .orElse({ case headers => Done(BadPart(headers), Input.Empty) }) } } // -- Parsing utilities /** * Wrap an existing BodyParser with a maxLength constraints. * * @param maxLength The max length allowed * @param parser The BodyParser to wrap */ def maxLength[A](maxLength: Int, parser: BodyParser[A]): BodyParser[Either[MaxSizeExceeded, A]] = BodyParser("maxLength=" + maxLength + ", wrapping=" + parser.toString) { request => Traversable.takeUpTo[Array[Byte]](maxLength).transform(parser(request)).flatMap(Iteratee.eofOrElse(MaxSizeExceeded(maxLength))).map { case Right(Right(result)) => Right(Right(result)) case Right(Left(badRequest)) => Left(badRequest) case Left(maxSizeExceeded) => Right(Left(maxSizeExceeded)) } } /** * A body parser that always returns an error. */ def error[A](result: Result): BodyParser[A] = BodyParser("error, result=" + result) { request => Done(Left(result), Empty) } /** * Allow to choose the right BodyParser parser to use by examining the request headers. */ def using[A](f: RequestHeader => BodyParser[A]) = BodyParser { request => f(request)(request) } /** * Create a conditional BodyParser. */ def when[A](predicate: RequestHeader => Boolean, parser: BodyParser[A], badResult: RequestHeader => Result): BodyParser[A] = { BodyParser("conditional, wrapping=" + parser.toString) { request => if (predicate(request)) { parser(request) } else { Done(Left(badResult(request)), Empty) } } } } } /** * Defaults BodyParsers. */ object BodyParsers extends BodyParsers /** * Signal a max content size exceeded */ case class MaxSizeExceeded(length: Int)
noel-yap/setter-for-catan
play-2.1.1/framework/src/play/src/main/scala/play/api/mvc/ContentTypes.scala
Scala
apache-2.0
26,039
/** * For copyright information see the LICENSE document. */ package entice.server.controllers import entice.server._, Net._ import entice.server.world._ import entice.server.utils._ import entice.server.scripting._ import entice.protocol._ import com.twitter.util.Eval import akka.actor.{ Actor, ActorRef, ActorLogging, ActorSystem, Props } import java.io._ class Command extends Actor with ActorLogging with Subscriber with Clients { val subscriptions = classOf[ChatCommand] :: Nil override def preStart { register } var scripts: Map[String, scripting.Command] = retrieveScripts val scriptContext = CommandContext(context.system, messageBus, _: Client) def receive = { case MessageEvent(session, ChatCommand(cmd, args)) => clients.get(session) match { case Some(client) if client.state == Playing => if (cmd == "helpme") { session ! ServerMessage("Available commands:") session ! ServerMessage(" - (built-in) helpme") session ! ServerMessage(" - (built-in) info <command-name>") session ! ServerMessage(" - (built-in) load <path/to/file>") session ! ServerMessage(" - (built-in) reload") scripts.keySet.foreach { scr => session ! ServerMessage(s" - ${scr}") } } else if (cmd == "info" && !args.isEmpty) { scripts.get(args.head) match { case Some(script) => session ! ServerMessage(s"Command '${cmd}' does:") session ! ServerMessage(script.info.generalInfo) session ! ServerMessage(s"Command '${cmd}' takes:") script.info.argsHelp foreach { a => session ! ServerMessage(s" - ${a}") } session ! ServerMessage(s"Command '${cmd}' usage:") session ! ServerMessage(script.info.usageInfo) case None => session ! ServerMessage("No such command available.") } } else if (cmd == "load") { // TODO: load smth on the fly session ! ServerMessage("Not yet implemented.") } else if (cmd == "reload") { scripts = retrieveScripts session ! ServerMessage("All command scripts reloaded.") } else if (scripts.contains(cmd)) { log.debug(s"\\nRunning script for command '${cmd}'...") scripts(cmd).run(args, scriptContext(client)) match { case Some(errormsg) => session ! ServerMessage(errormsg) case None => } } case _ => session ! Failure("Not logged in, or not playing.") session ! Kick } } def retrieveScripts = { val scriptFiles = new File(Config.get.commands).listFiles() var result = Map[String, scripting.Command]() for (scriptFile <- scriptFiles) { val script = (new Eval)[scripting.Command](scriptFile) log.info(s"Loaded script for command '${script.info.command}'.") result = result + (script.info.command -> script) } result } }
entice/old-server
src/main/scala/entice/server/controllers/Command.scala
Scala
bsd-3-clause
3,675
/*********************************************************************** * Copyright (c) 2013-2020 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.accumulo.audit import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.{ScheduledThreadPoolExecutor, TimeUnit} import com.typesafe.scalalogging.LazyLogging import org.apache.accumulo.core.client.{BatchWriter, Connector} import org.apache.accumulo.core.data.Mutation import org.locationtech.geomesa.accumulo.util.{GeoMesaBatchWriterConfig, TableUtils} import org.locationtech.geomesa.utils.audit.AuditedEvent import org.locationtech.geomesa.utils.concurrent.ExitingExecutor import org.locationtech.geomesa.utils.conf.GeoMesaSystemProperties.SystemProperty /** * Manages writing of usage stats in a background thread. */ class AccumuloEventWriter(connector: Connector, table: String) extends Runnable with Closeable with LazyLogging { private val delay = AccumuloEventWriter.WriteInterval.toDuration.get.toMillis logger.trace(s"Scheduling audit writer for ${delay}ms") private val schedule = AccumuloEventWriter.executor.scheduleWithFixedDelay(this, delay, delay, TimeUnit.MILLISECONDS) private val batchWriterConfig = GeoMesaBatchWriterConfig().setMaxMemory(10000L).setMaxWriteThreads(5) private var maybeWriter: BatchWriter = _ private val running = new AtomicBoolean(true) private val queue = new java.util.concurrent.ConcurrentLinkedQueue[() => Mutation] /** * Queues a stat for writing */ def queueStat[T <: AuditedEvent](event: T)(implicit transform: AccumuloEventTransform[T]): Unit = queue.offer(() => transform.toMutation(event)) override def run(): Unit = { var toMutation = queue.poll() if (toMutation != null) { val writer = getWriter do { writer.addMutation(toMutation()) toMutation = queue.poll() } while (toMutation != null && running.get) writer.flush() } } override def close(): Unit = { running.set(false) schedule.cancel(false) synchronized { if (maybeWriter != null) { maybeWriter.close() } } } private def getWriter: BatchWriter = synchronized { if (maybeWriter == null) { TableUtils.createTableIfNeeded(connector, table) maybeWriter = connector.createBatchWriter(table, batchWriterConfig) } maybeWriter } } object AccumuloEventWriter { val WriteInterval: SystemProperty = SystemProperty("geomesa.accumulo.audit.interval", "5 seconds") private val executor = ExitingExecutor(new ScheduledThreadPoolExecutor(5), force = true) }
aheyne/geomesa
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/audit/AccumuloEventWriter.scala
Scala
apache-2.0
2,967
package $package$.util import scopt.OptionParser case object Config object ConfigParser extends OptionParser[Config.type](BuildInfo.name) { head(BuildInfo.name, BuildInfo.version) }
ChrisKaminski/github-scala-app.g8
src/main/g8/app/src/main/scala/$package$/util/Config.scala
Scala
mit
187
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import org.scalatest.matchers._ import java.lang.reflect.Method import java.lang.reflect.Modifier import scala.util.matching.Regex import java.lang.reflect.Field import scala.collection.Traversable import Assertions.areEqualComparingArraysStructurally import org.scalatest.exceptions.TestFailedException import scala.collection.GenTraversable import scala.collection.GenSeq import scala.collection.GenMap import org.scalactic.Tolerance import scala.annotation.tailrec import org.scalactic.Equality import org.scalatest.words.ShouldVerb import org.scalatest.matchers.HavePropertyMatcher import org.scalatest.matchers.HavePropertyMatchResult import org.scalatest.matchers.BePropertyMatcher import org.scalatest.matchers.BePropertyMatchResult import org.scalatest.matchers.BeMatcher import org.scalatest.matchers.Matcher import org.scalatest.matchers.MatchResult import words.RegexWithGroups // TODO: drop generic support for be as an equality comparison, in favor of specific ones. // TODO: mention on JUnit and TestNG docs that you can now mix in ShouldMatchers or MustMatchers // TODO: Put links from ShouldMatchers to wherever I reveal the matrix and algo of how properties are checked dynamically. // TODO: double check that I wrote tests for (length (7)) and (size (8)) in parens // TODO: document how to turn off the === implicit conversion // TODO: Document you can use JMock, EasyMock, etc. private[scalatest] object MatchersHelper { // SKIP-SCALATESTJS-START // If the symbol passed is 'title, this will look for a field named "title", a method named "title", or a // method named "getTitle". The method must take no parameters. // // F (field) | M (method) | G (get or is method) | Result // 0 0 0 None // 0 0 1 Some(G) // 0 1 0 Some(M) // 0 1 1 Some(M) prefer a Scala style one of a Java style, such as when using BeanProperty annotation // 1 0 0 Some(F) ignore the field if there's a method. in Java often name a field and get method the same // 1 0 1 Some(G) // 1 1 0 Some(M) // 1 1 1 Some(M) prefer a Scala style one of a Java style, such as when using BeanProperty annotation // def accessProperty(objectWithProperty: AnyRef, propertySymbol: Symbol, isBooleanProperty: Boolean): Option[Any] = { // If 'title passed, propertyName would be "title" val propertyName = propertySymbol.name // if propertyName is '>, mangledPropertyName would be "$greater" val mangledPropertyName = transformOperatorChars(propertyName) // fieldNameToAccess and methodNameToInvoke would also be "title" val fieldNameToAccess = mangledPropertyName val methodNameToInvoke = mangledPropertyName // methodNameToInvokeWithGet would be "getTitle" val prefix = if (isBooleanProperty) "is" else "get" val methodNameToInvokeWithGet = prefix + mangledPropertyName(0).toUpper + mangledPropertyName.substring(1) val firstChar = propertyName(0).toLower val methodNameStartsWithVowel = firstChar == 'a' || firstChar == 'e' || firstChar == 'i' || firstChar == 'o' || firstChar == 'u' def isFieldToAccess(field: Field): Boolean = field.getName == fieldNameToAccess // If it is a predicate, I check the result type, otherwise I don't. Maybe I should just do that. Could be a later enhancement. def isMethodToInvoke(method: Method): Boolean = method.getName == methodNameToInvoke && method.getParameterTypes.length == 0 && !Modifier.isStatic(method.getModifiers()) && (!isBooleanProperty || method.getReturnType == classOf[Boolean]) def isGetMethodToInvoke(method: Method): Boolean = method.getName == methodNameToInvokeWithGet && method.getParameterTypes.length == 0 && !Modifier.isStatic(method.getModifiers()) && (!isBooleanProperty || method.getReturnType == classOf[Boolean]) val fieldOption = objectWithProperty.getClass.getFields.find(isFieldToAccess) val methodOption = objectWithProperty.getClass.getMethods.find(isMethodToInvoke) val getMethodOption = objectWithProperty.getClass.getMethods.find(isGetMethodToInvoke) (fieldOption, methodOption, getMethodOption) match { case (_, Some(method), _) => Some(method.invoke(objectWithProperty, Array[AnyRef](): _*)) case (_, None, Some(getMethod)) => Some(getMethod.invoke(objectWithProperty, Array[AnyRef](): _*)) case (Some(field), None, None) => Some(field.get(objectWithProperty)) case (None, None, None) => None } } // SKIP-SCALATESTJS-END def transformOperatorChars(s: String): String = { val builder = new StringBuilder for (i <- 0 until s.length) { val ch = s.charAt(i) val replacement = ch match { case '!' => "$bang" case '#' => "$hash" case '~' => "$tilde" case '|' => "$bar" case '^' => "$up" case '\\\\' => "$bslash" case '@' => "$at" case '?' => "$qmark" case '>' => "$greater" case '=' => "$eq" case '<' => "$less" case ':' => "$colon" case '/' => "$div" case '-' => "$minus" case '+' => "$plus" case '*' => "$times" case '&' => "$amp" case '%' => "$percent" case _ => "" } if (replacement.length > 0) builder.append(replacement) else builder.append(ch) } builder.toString } def newTestFailedException(message: String, optionalCause: Option[Throwable] = None, stackDepthAdjustment: Int = 0): Throwable = { val temp = new RuntimeException // should not look for anything in the first 2 elements, caller stack element is at 3rd/4th // also, it solves the problem when the suite file that mixin in Matchers has the [suiteFileName]:newTestFailedException appears in the top 2 elements // this approach should be better than adding && _.getMethodName == newTestFailedException we used previously. val elements = temp.getStackTrace.drop(2) // TODO: Perhaps we should add org.scalatest.enablers also here later? // TODO: Probably need a MatchersHelper.scala here also val stackDepth = elements.indexWhere(st => st.getFileName != "Matchers.scala" && st.getFileName != "MustMatchers.scala" && !st.getClassName.startsWith("org.scalatest.words.")) + 2 // the first 2 elements dropped previously optionalCause match { case Some(cause) => new TestFailedException(message, cause, stackDepth + stackDepthAdjustment) case None => new TestFailedException(message, stackDepth + stackDepthAdjustment) } } def andMatchersAndApply[T](left: T, leftMatcher: Matcher[T], rightMatcher: Matcher[T]): MatchResult = { val leftMatchResult = leftMatcher(left) val rightMatchResult = rightMatcher(left) // Not short circuiting anymore if (!leftMatchResult.matches) leftMatchResult else { MatchResult( rightMatchResult.matches, Resources.rawCommaBut, Resources.rawCommaAnd, Resources.rawCommaBut, Resources.rawCommaAnd, Vector(NegatedFailureMessage(leftMatchResult), MidSentenceFailureMessage(rightMatchResult)), Vector(NegatedFailureMessage(leftMatchResult), MidSentenceNegatedFailureMessage(rightMatchResult)), Vector(MidSentenceNegatedFailureMessage(leftMatchResult), MidSentenceFailureMessage(rightMatchResult)), Vector(MidSentenceNegatedFailureMessage(leftMatchResult), MidSentenceNegatedFailureMessage(rightMatchResult)) ) } } def orMatchersAndApply[T](left: T, leftMatcher: Matcher[T], rightMatcher: Matcher[T]): MatchResult = { val leftMatchResult = leftMatcher(left) val rightMatchResult = rightMatcher(left) // Not short circuiting anymore if (leftMatchResult.matches) leftMatchResult.copy(matches = true) else { MatchResult( rightMatchResult.matches, Resources.rawCommaAnd, Resources.rawCommaAnd, Resources.rawCommaAnd, Resources.rawCommaAnd, Vector(FailureMessage(leftMatchResult), MidSentenceFailureMessage(rightMatchResult)), Vector(FailureMessage(leftMatchResult), MidSentenceNegatedFailureMessage(rightMatchResult)), Vector(MidSentenceFailureMessage(leftMatchResult), MidSentenceFailureMessage(rightMatchResult)), Vector(MidSentenceFailureMessage(leftMatchResult), MidSentenceNegatedFailureMessage(rightMatchResult)) ) } } // SKIP-SCALATESTJS-START def matchSymbolToPredicateMethod(left: AnyRef, right: Symbol, hasArticle: Boolean, articleIsA: Boolean, stackDepth: Int = 0): MatchResult = { // If 'empty passed, rightNoTick would be "empty" val propertyName = right.name accessProperty(left, right, true) match { case None => // if propertyName is '>, mangledPropertyName would be "$greater" val mangledPropertyName = transformOperatorChars(propertyName) // methodNameToInvoke would also be "empty" val methodNameToInvoke = mangledPropertyName // methodNameToInvokeWithIs would be "isEmpty" val methodNameToInvokeWithIs = "is"+ mangledPropertyName(0).toUpper + mangledPropertyName.substring(1) val firstChar = propertyName(0).toLower val methodNameStartsWithVowel = firstChar == 'a' || firstChar == 'e' || firstChar == 'i' || firstChar == 'o' || firstChar == 'u' throw newTestFailedException( if (methodNameStartsWithVowel) FailureMessages.hasNeitherAnOrAnMethod(left, UnquotedString(methodNameToInvoke), UnquotedString(methodNameToInvokeWithIs)) else FailureMessages.hasNeitherAOrAnMethod(left, UnquotedString(methodNameToInvoke), UnquotedString(methodNameToInvokeWithIs)), None, stackDepth ) case Some(result) => val (wasNot, was) = if (hasArticle) { if (articleIsA) (Resources.rawWasNotA, Resources.rawWasA) else (Resources.rawWasNotAn, Resources.rawWasAn) } else (Resources.rawWasNot, Resources.rawWas) MatchResult( result == true, // Right now I just leave the return value of accessProperty as Any wasNot, was, Vector(left, UnquotedString(propertyName)) ) } } // SKIP-SCALATESTJS-END def checkPatternMatchAndGroups(matches: Boolean, left: String, pMatcher: java.util.regex.Matcher, regex: Regex, groups: IndexedSeq[String], didNotMatchMessage: => String, matchMessage: => String, notGroupAtIndexMessage: => String, notGroupMessage: => String, andGroupMessage: => String): MatchResult = { if (groups.size == 0 || !matches) MatchResult( matches, didNotMatchMessage, matchMessage, Vector(left, UnquotedString(regex.toString)) ) else { val count = pMatcher.groupCount val failed = // Find the first group that fails groups.zipWithIndex.find { case (group, idx) => val groupIdx = idx + 1 !(groupIdx <= count && pMatcher.group(groupIdx) == group) } failed match { case Some((group, idx)) => MatchResult( false, if (groups.size > 1) notGroupAtIndexMessage else notGroupMessage, andGroupMessage, if (groups.size > 1) Vector(left, UnquotedString(regex.toString), pMatcher.group(idx + 1), UnquotedString(group), idx) else Vector(left, UnquotedString(regex.toString), pMatcher.group(1), UnquotedString(group)), Vector(left, UnquotedString(regex.toString), UnquotedString(groups.mkString(", "))) ) case None => // None of group failed MatchResult( true, notGroupMessage, andGroupMessage, Vector(left, UnquotedString(regex.toString), pMatcher.group(1), UnquotedString(groups.mkString(", "))), Vector(left, UnquotedString(regex.toString), UnquotedString(groups.mkString(", "))) ) } } } def fullyMatchRegexWithGroups(left: String, regex: Regex, groups: IndexedSeq[String]): MatchResult = { val pMatcher = regex.pattern.matcher(left) val matches = pMatcher.matches checkPatternMatchAndGroups(matches, left, pMatcher, regex, groups, Resources.rawDidNotFullyMatchRegex, Resources.rawFullyMatchedRegex, Resources.rawFullyMatchedRegexButNotGroupAtIndex, Resources.rawFullyMatchedRegexButNotGroup, Resources.rawFullyMatchedRegexAndGroup) } def startWithRegexWithGroups(left: String, regex: Regex, groups: IndexedSeq[String]): MatchResult = { val pMatcher = regex.pattern.matcher(left) val matches = pMatcher.lookingAt checkPatternMatchAndGroups(matches, left, pMatcher, regex, groups, Resources.rawDidNotStartWithRegex, Resources.rawStartedWithRegex, Resources.rawStartedWithRegexButNotGroupAtIndex, Resources.rawStartedWithRegexButNotGroup, Resources.rawStartedWithRegexAndGroup) } def endWithRegexWithGroups(left: String, regex: Regex, groups: IndexedSeq[String]): MatchResult = { val pMatcher = regex.pattern.matcher(left) val found = pMatcher.find val matches = found && pMatcher.end == left.length checkPatternMatchAndGroups(matches, left, pMatcher, regex, groups, Resources.rawDidNotEndWithRegex, Resources.rawEndedWithRegex, Resources.rawEndedWithRegexButNotGroupAtIndex, Resources.rawEndedWithRegexButNotGroup, Resources.rawEndedWithRegexAndGroup) } def includeRegexWithGroups(left: String, regex: Regex, groups: IndexedSeq[String]): MatchResult = { val pMatcher = regex.pattern.matcher(left) val matches = pMatcher.find checkPatternMatchAndGroups(matches, left, pMatcher, regex, groups, Resources.rawDidNotIncludeRegex, Resources.rawIncludedRegex, Resources.rawIncludedRegexButNotGroupAtIndex, Resources.rawIncludedRegexButNotGroup, Resources.rawIncludedRegexAndGroup) } }
SRGOM/scalatest
scalatest/src/main/scala/org/scalatest/MatchersHelper.scala
Scala
apache-2.0
14,961
package controllers import play.api._ import play.api.mvc._ import models._ import play.api.data._ import play.api.data.Forms._ import play.api.data.format.Formats._ import play.api.Play.current import play.api.libs.concurrent.Akka import play.api.libs.json._ import play.api.libs.concurrent.Execution.Implicits._ import controllerhelper._ import tp_utils.Tryer._ import reactivemongo.bson._ import scala.language.reflectiveCalls import scala.concurrent._ import scala.concurrent.duration._ import scala.language.postfixOps import controllers.helper.{TablePager, CRUDer} object SonController extends Controller with TablePager[Son] with CRUDer[Son] { def index = Action { implicit request => Ok(views.html.familyPage("son", controllers.routes.SonController.table, elemsToDisplay)) } val singleton = Son def elemValues(gp: Son) = Seq(gp.id.stringify,gp.name) override val elemsToDisplay = Seq("id","name") override val elemsToFilter = Seq("name") def formTemplate(formgp: Form[Son])(implicit request: RequestHeader): play.api.templates.Html = views.html.sonForm(formgp) def form = Form( mapping( //TODO verifyId "id" -> text, "name" -> nonEmptyText, "fa" -> text.verifyOptionBSONId ){(id, name, _fa) => { val fa = Await.result(Father.findOneByIdString(_fa), 3 seconds) val father = tryo{Reference[Father](fa.get.id)} (tryo({ if (id.equals("")) throw new Exception("") else BSONObjectID.parse(id).toOption.get })) match { case Some(oid) => //UPDATE Await.result( Son.update(oid, Son( id = oid, name = name, fa = father ) ), 3 seconds) Await.result(Son.findOneById(oid), 3 seconds).get case _ => //CREATE val son = {if (id.equals("")) Son( name = name, fa = father ) else Son( id = BSONObjectID.parse(id).toOption.get, name = name, fa = father )} Await.result( Son.create( son ), 3 seconds) Await.result(Son.findOneById(son.id), 3 seconds).get } } }{s => { Some(s.id.stringify, s.name, s.fa.map(x => x.id.stringify).getOrElse("") ) } } ) }
TPTeam/reactive_mongo_example
app/controllers/SonController.scala
Scala
mit
2,781
package knot.net.tcp import knot.core.Provider import knot.core.cell.Cell import knot.net.Tcp object TcpWorker { def provider(tcp: Tcp) = Provider(new TcpWorker(tcp)) } /** * this cell is tcpserver or tcpclient */ class TcpWorker(val tcp: Tcp) extends Cell { val loop: NioEventLoop = { val l = NioEventLoop() l.start(x => { log.error(x, "error, tcp event loop") context.error(x) }) l } override def behave = { case b: Tcp.Bind => context.cellOf(NioListner.provider(tcp, loop, context.sender, b)) case c: Tcp.Connect => context.cellOf(NioOutgoing.provider(tcp, loop, context.sender, c)) } }
defvar/knot
knot-net/src/main/scala/knot/net/tcp/TcpWorker.scala
Scala
mit
657
import org.scalatest._ class LeapTest extends FunSuite { test ("vanilla leap year") { assert(Year(1996).isLeap) } test ("any old year") { pending assert(!Year(1997).isLeap) } test ("century") { pending assert(!Year(1900).isLeap) } test ("exceptional century") { pending assert(Year(2000).isLeap) } }
nlochschmidt/xscala
leap/src/test/scala/leap_test.scala
Scala
mit
348
object Test { type StrHead[X <: Tuple] = X match { case (x <: String) *: _ => x // error } // Futher minimized type M[X] = X match { case (x) *: _ => Int } // The exception can also be reached with normal pattern matching 1 match { case _: Option[(x)] => () } }
som-snytt/dotty
tests/neg/7043.scala
Scala
apache-2.0
292
/* * Copyright 2009-2017. DigitalGlobe, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations under the License. */ package org.mrgeo.mapalgebra import java.awt.image.DataBuffer import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput} import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import org.mrgeo.data.raster.{MrGeoRaster, RasterWritable} import org.mrgeo.data.rdd.RasterRDD import org.mrgeo.data.tile.TileIdWritable import org.mrgeo.image.MrsPyramidMetadata import org.mrgeo.job.JobArguments import org.mrgeo.mapalgebra.raster.RasterMapOp import org.mrgeo.spark.FocalBuilder import org.mrgeo.utils.SparkUtils abstract class RawFocalMapOp extends RasterMapOp with Externalizable { protected var inputMapOp:Option[RasterMapOp] = None private var rasterRDD:Option[RasterRDD] = None private var outputNoDatas:Option[Array[Double]] = None override def rdd():Option[RasterRDD] = rasterRDD override def getZoomLevel(): Int = { inputMapOp.getOrElse(throw new IOException("No raster input specified")).getZoomLevel() } override def execute(context:SparkContext):Boolean = { val input:RasterMapOp = inputMapOp getOrElse (throw new IOException("Input MapOp not valid!")) val meta = input.metadata() getOrElse (throw new IOException("Can't load metadata! Ouch! " + input.getClass.getName)) val rdd = input.rdd() getOrElse (throw new IOException("Can't load RDD! Ouch! " + inputMapOp.getClass.getName)) beforeExecute(meta) val zoom = meta.getMaxZoomLevel val tilesize = meta.getTilesize val nodatas = meta.getDefaultValuesNumber val neighborhoodInfo = getNeighborhoodInfo val neighborhoodWidth = neighborhoodInfo._1 val neighborhoodHeight = neighborhoodInfo._2 val bufferX = neighborhoodWidth / 2 val bufferY = neighborhoodHeight / 2 val tiles = FocalBuilder.create(rdd, bufferX, bufferY, meta.getBounds, zoom, nodatas, context) rasterRDD = Some( RasterRDD(calculate(tiles, bufferX, bufferY, neighborhoodWidth, neighborhoodHeight, nodatas, zoom, tilesize))) metadata(SparkUtils.calculateMetadata(rasterRDD.get, zoom, getOutputNoData, bounds = meta.getBounds, calcStats = false)) true } override def setup(job:JobArguments, conf:SparkConf):Boolean = { true } override def teardown(job:JobArguments, conf:SparkConf):Boolean = { true } override def readExternal(in:ObjectInput):Unit = { } override def writeExternal(out:ObjectOutput):Unit = { } /** * Compute and return the value to be assigned to the pixel at processX, processY in the * source raster. It is guaranteed that the value of the pixel being processed is not nodata, * but there is no guarantee for its neighborhood pixels. * * Note that the neighborhood width and height can be either odd or even, meaning that the pixel * being processed can be either in the center of the neighborhood or slightly left and/or * above center. For example, if the neighborhoodWidth is 3, then the processing pixel will * be in the middle of the neighborhood. If the neighborhoodWidth is 4, it will be the second pixel * from the left (e.g. xLeftOffset will be 1). * * @param raster * @param notnodata An raster of booleans indicating whether each pixel value in * the source raster is nodata or not. Using this array improves * performance during neighborhood calculations because the "is nodata" * checks are expensive when repeatedly run for the same pixel. * @param processX The x pixel coordinate in the source raster of the pixel to process * @param processY The y pixel coordinate in the source raster of the pixel to process * @param xLeftOffset Defines the left boundary of the neighborhood. This is the number of pixels * to the left of the pixel being processed. * @param neighborhoodWidth The width of the neighborhood in pixels. * @param yAboveOffset Defines the top boundary of the neighborhood. This is the number of pixels * above the pixel being processed. * @param neighborhoodHeight The height of the neighborhood in pixels. * @return */ protected def computePixelValue(raster:MrGeoRaster, notnodata:MrGeoRaster, outNoData:Double, rasterWidth:Int, processX:Int, processY:Int, processBand:Int, xLeftOffset:Int, neighborhoodWidth:Int, yAboveOffset:Int, neighborhoodHeight:Int, tileId:Long):Double /** * This method is called at the start of the "execute" method, giving sub-classes an * opportunity to perform some processing or initialization prior to executing the * map op. * * @param meta */ protected def beforeExecute(meta:MrsPyramidMetadata):Unit = { outputNoDatas = Some(Array.fill[Double](meta.getBands)(Double.NaN)) } /** * Returns 2 values about the neighborhood to use (neighborhood width, neighborhood height). * * This method is called at the start of the execution of this map op. * * @return */ protected def getNeighborhoodInfo:(Int, Int) protected def getOutputTileType:Int = { DataBuffer.TYPE_FLOAT } protected def getOutputNoData:Array[Double] = { outputNoDatas match { case Some(nodatas) => nodatas case None => throw new IllegalStateException("The output nodata values have not been set") } } private def isNoData(value:Double, nodata:Double):Boolean = { if (nodata.isNaN) { value.isNaN } else { value == nodata } } private def calculate(tiles:RDD[(TileIdWritable, RasterWritable)], bufferX:Int, bufferY:Int, neighborhoodWidth:Int, neighborhoodHeight:Int, nodatas:Array[Double], zoom:Int, tilesize:Int) = { val outputNoData = getOutputNoData tiles.map(tile => { val raster = RasterWritable.toMrGeoRaster(tile._2) val answer = MrGeoRaster.createEmptyRaster(tilesize, tilesize, raster.bands(), getOutputTileType) // , Float.NaN) // If neighborhoodWidth is an odd value, then the neighborhood has the same number of pixels to the left // and right of the source pixel. If even, then it has one fewer pixels to the left of the // source value than to the right. val xLeftOffset = if ((neighborhoodWidth % 2) == 0) { (neighborhoodWidth / 2) - 1 } else { neighborhoodWidth / 2 } // If neighborhoodHeight is an odd value, then the neighborhood has the same number of pixels above and // below the source pixel. If even, then it has one fewer pixel above than below. val yAboveOffset = if ((neighborhoodHeight % 2) == 0) { (neighborhoodHeight / 2) - 1 } else { neighborhoodHeight / 2 } // For performance, construct an array of booleans indicating whether or not each // pixel value in the source raster is nodata or not val rasterWidth = raster.width() val rasterHeight = raster.height() var band:Int = 0 val notnodata = MrGeoRaster.createEmptyRaster(rasterWidth, rasterHeight, 1, DataBuffer.TYPE_BYTE) while (band < raster.bands()) { val outputNoDataForBand = outputNoData(band).doubleValue() var py = 0 var px = 0 while (py < rasterHeight) { px = 0 while (px < rasterWidth) { val v = raster.getPixelDouble(px, py, band) if (!isNoData(v, nodatas(band))) { notnodata.setPixel(px, py, 0, 1.toByte) } else { notnodata.setPixel(px, py, 0, 0.toByte) } px += 1 } py += 1 } py = 0 px = 0 while (py < tilesize) { px = 0 while (px < tilesize) { val srcX = px + bufferX val srcY = py + bufferY // If the source pixel is nodata, skip it if (notnodata.getPixelByte(srcX, srcY, band) == 1) { answer.setPixel(px, py, band, computePixelValue(raster, notnodata, outputNoDataForBand, rasterWidth, srcX, srcY, band, xLeftOffset, neighborhoodWidth, yAboveOffset, neighborhoodHeight, tile._1.get())) } else { answer.setPixel(px, py, band, outputNoDataForBand) } px += 1 } py += 1 } band += 1 } (new TileIdWritable(tile._1), RasterWritable.toWritable(answer)) }) } }
ngageoint/mrgeo
mrgeo-mapalgebra/mrgeo-mapalgebra-image/src/main/scala/org/mrgeo/mapalgebra/RawFocalMapOp.scala
Scala
apache-2.0
9,374
/* * Copyright 2010 LinkedIn * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import java.util.Properties import kafka.message._ import kafka.utils._ /** * Configuration settings for the kafka server */ class KafkaConfig(props: Properties) extends ZKConfig(props) { /* the port to listen and accept connections on */ val port: Int = Utils.getInt(props, "port", 6667) /* hostname of broker. If not set, will pick up from the value returned from getLocalHost. */ val hostName: String = Utils.getString(props, "hostname", null) /* the broker id for this server */ val brokerId: Int = Utils.getInt(props, "brokerid") /* the SO_SNDBUFF buffer of the socket sever sockets */ val socketSendBuffer: Int = Utils.getInt(props, "socket.send.buffer", 100*1024) /* the SO_RCVBUFF buffer of the socket sever sockets */ val socketReceiveBuffer: Int = Utils.getInt(props, "socket.receive.buffer", 100*1024) /* the maximum number of bytes in a socket request */ val maxSocketRequestSize: Int = Utils.getIntInRange(props, "max.socket.request.bytes", 100*1024*1024, (1, Int.MaxValue)) /* the number of worker threads that the server uses for handling all client requests*/ val numThreads = Utils.getIntInRange(props, "num.threads", Runtime.getRuntime().availableProcessors, (1, Int.MaxValue)) /* the interval in which to measure performance statistics */ val monitoringPeriodSecs = Utils.getIntInRange(props, "monitoring.period.secs", 30, (1, Int.MaxValue)) /* the default number of log partitions per topic */ val numPartitions = Utils.getIntInRange(props, "num.partitions", 1, (1, Int.MaxValue)) /* the directory in which the log data is kept */ val logDir = Utils.getString(props, "log.dir") /* the maximum size of a single log file */ val logFileSize = Utils.getIntInRange(props, "log.file.size", 1*1024*1024*1024, (Message.HeaderSize, Int.MaxValue)) /* the number of messages accumulated on a log partition before messages are flushed to disk */ val flushInterval = Utils.getIntInRange(props, "log.flush.interval", 1, (1, Int.MaxValue)) /* the number of hours to keep a log file before deleting it */ val logRetentionHours = Utils.getIntInRange(props, "log.retention.hours", 24 * 7, (1, Int.MaxValue)) /* the frequency in minutes that the log cleaner checks whether any log is eligible for deletion */ val logCleanupIntervalMinutes = Utils.getIntInRange(props, "log.cleanup.interval.mins", 10, (1, Int.MaxValue)) /* enable zookeeper registration in the server */ val enableZookeeper = Utils.getBoolean(props, "enable.zookeeper", true) /* the maximum time in ms that a message in selected topics is kept in memory before flushed to disk, e.g., topic1:3000,topic2: 6000 */ val flushIntervalMap = Utils.getTopicFlushIntervals(Utils.getString(props, "topic.flush.intervals.ms", "")) /* the frequency in ms that the log flusher checks whether any log needs to be flushed to disk */ val flushSchedulerThreadRate = Utils.getInt(props, "log.default.flush.scheduler.interval.ms", 5000) /* the maximum time in ms that a message in any topic is kept in memory before flushed to disk */ val defaultFlushIntervalMs = Utils.getInt(props, "log.default.flush.interval.ms", flushSchedulerThreadRate) /* the number of partitions for selected topics, e.g., topic1:8,topic2:16 */ val topicPartitionsMap = Utils.getTopicPartitions(Utils.getString(props, "topic.partition.count.map", "")) }
jinfei21/kafka
src/kafka/server/KafkaConfig.scala
Scala
apache-2.0
4,017
package services.sitedata import javax.inject._ import scala.concurrent.Future import utils.Awaits import models.sitedata.AccessMailAccount import dao.sitedata.AccessMailAccountDao import dao.sitedata.IAccessMailAccountDao trait IAccessMailAccountService extends BaseService2[AccessMailAccount]{ def insert(accessmailaccount: AccessMailAccount): Future[Unit] def update(from: String, accessmailaccount: AccessMailAccount): Future[Unit] def remove(from: String): Future[Int] def findById(from: String): Future[Option[AccessMailAccount]] def findAll(): Future[Option[Seq[AccessMailAccount]]] def findAllAccessMailAccounts(): Seq[(String,String)] } @Singleton class AccessMailAccountService @Inject() (dao:IAccessMailAccountDao) extends IAccessMailAccountService{ import play.api.libs.concurrent.Execution.Implicits.defaultContext def insert(accessmailaccount: AccessMailAccount): Future[Unit] = { dao.insert(accessmailaccount); } def update(from: String, accessmailaccount: AccessMailAccount):Future[Unit] = { // accessmailaccount.from = Option(from.toInt) // accessmailaccount.from = from dao.update(accessmailaccount) } def remove(from: String): Future[Int] = { dao.remove(from) } def findById(from: String): Future[Option[AccessMailAccount]] = { dao.findById(from) } def findAll():Future[Option[Seq[AccessMailAccount]]] = { dao.findAll().map { x => Option(x) } } private def validateId(from: String):Unit = { val future = findById(from) val entry = Awaits.get(5, future) if (entry == null || entry.equals(None)) throw new RuntimeException("Could not find AccessMailAccount: " + from) } def findAllAccessMailAccounts():Seq[(String, String)] = { val future = this.findAll() val result = Awaits.get(5, future) val accessmailaccounts:Seq[(String, String)] = result.getOrElse(Seq(AccessMailAccount("", "", "", "", ""))) .toSeq .map { accessmailaccount => (accessmailaccount.from, accessmailaccount.smtpaddress) } return accessmailaccounts } }
tnddn/iv-web
portal/rest-portal/app/services/sitedata/AccessMailAccountService.scala
Scala
apache-2.0
2,080
package net.randallalexander.restaurant.chooser.model import io.circe._ import io.circe.generic.semiauto._ import net.randallalexander.restaurant.chooser.model.Ethnicity._ import net.randallalexander.restaurant.chooser.model.KindOfFood._ /* maybe calculate pricePerPerson when request is made instead? .... how about keeping track of price and person tranx info per restaurant and calc pricePerPerson on the fly probably need a search api to make that useable */ case class Restaurant (id:Option[String], name:String, address: Address, ethnicity: Option[Ethnicity], kindOfFood:Option[KindOfFood], pricePerPerson:Option[Double]) object Restaurant { implicit val restaurantDecoder: Decoder[Restaurant] = deriveDecoder[Restaurant] implicit val restaurantEncoder: Encoder[Restaurant] = deriveEncoder[Restaurant] } case class Address(addressLine1:String,city:String,state:String,zip:String, geo:Option[Geo]) object Address { implicit val addressDecoder: Decoder[Address] = deriveDecoder[Address] .validate(Validation.state,"Invalid state code") .validate(Validation.zip,"Invalid zip code") implicit val addressEncoder: Encoder[Address] = deriveEncoder[Address] } case class Geo(lat:Double, long:Double) object Geo { implicit val geoDecoder: Decoder[Geo] = deriveDecoder[Geo] implicit val geoEncoder: Encoder[Geo] = deriveEncoder[Geo] } sealed trait Ethnicity extends DatabaseEnum object Ethnicity { case object mexican extends Ethnicity case object american extends Ethnicity case object italian extends Ethnicity case object chinese extends Ethnicity case object greek extends Ethnicity implicit val ethnicityEncoder: Encoder[Ethnicity] = new Encoder[Ethnicity] { final def apply(ethnicity: Ethnicity): Json = { Json.fromString(ethnicity.name) } } implicit val ethnicityDecoder: Decoder[Ethnicity] = new Decoder[Ethnicity] { final def apply(cursor: HCursor): Decoder.Result[Ethnicity] = { val value = cursor.value.asString val kindOfFood = value.flatMap(EthnicityOps.toEnum) kindOfFood.toRight(DecodingFailure("Invalid ethnicity", cursor.history)) } } } object EthnicityOps extends DatabaseEnumOps [Ethnicity] { override def values = Seq(mexican,american,italian,chinese,greek) } sealed trait KindOfFood extends DatabaseEnum object KindOfFood { case object sandwich extends KindOfFood case object burrito extends KindOfFood implicit val kindOfFoodEncoder: Encoder[KindOfFood] = new Encoder[KindOfFood] { final def apply(kindOfFood: KindOfFood): Json = { Json.fromString(kindOfFood.name) } } implicit val kindOfFoodDecoder: Decoder[KindOfFood] = new Decoder[KindOfFood] { final def apply(cursor: HCursor): Decoder.Result[KindOfFood] = { val value = cursor.value.asString val kindOfFood = value.flatMap(KindOfFoodOps.toEnum) kindOfFood.toRight(DecodingFailure("Invalid kind of food", cursor.history)) } } } object KindOfFoodOps extends DatabaseEnumOps [KindOfFood] { override def values = Seq(burrito,sandwich) }
randallalexander/restaurant-chooser
service/src/main/scala/net/randallalexander/restaurant/chooser/model/Restaurant.scala
Scala
mit
3,065
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package iht.controllers.application.tnrb import iht.config.AppConfig import iht.controllers.application.ApplicationControllerTest import iht.forms.TnrbForms._ import iht.testhelpers.{CommonBuilder, ContentChecker} import iht.views.html.application.tnrb.gifts_made_before_death import org.joda.time.LocalDate import play.api.mvc.MessagesControllerComponents import play.api.test.Helpers._ import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController class GiftsMadeBeforeDeathControllerTest extends ApplicationControllerTest{ protected abstract class TestController extends FrontendController(mockControllerComponents) with GiftsMadeBeforeDeathController { override val cc: MessagesControllerComponents = mockControllerComponents override implicit val appConfig: AppConfig = mockAppConfig override val giftsMadeBeforeDeathView: gifts_made_before_death = app.injector.instanceOf[gifts_made_before_death] } def giftsMadeBeforeDeathController = new TestController { override val authConnector = mockAuthConnector override val cachingConnector = mockCachingConnector override val ihtConnector = mockIhtConnector } def giftsMadeBeforeDeathControllerNotAuthorised = new TestController { override val authConnector = mockAuthConnector // override val authConnector = mockAuthConnector override val cachingConnector = mockCachingConnector override val ihtConnector = mockIhtConnector } "GiftsMadeBeforeDeathController" must { "redirect to login page onPageLoad if the user is not logged in" in { val result = giftsMadeBeforeDeathController.onPageLoad(createFakeRequest(isAuthorised = false)) status(result) must be(SEE_OTHER) redirectLocation(result) must be (Some(loginUrl)) } "redirect to ida login page on Submit if the user is not logged in" in { val result = giftsMadeBeforeDeathController.onSubmit(createFakeRequest(isAuthorised = false)) status(result) must be(SEE_OTHER) redirectLocation(result) must be (Some(loginUrl)) } "respond with OK on page load" in { val applicationDetails = CommonBuilder.buildApplicationDetails.copy(widowCheck= Some(CommonBuilder.buildWidowedCheck)) createMocksForApplication(mockCachingConnector, mockIhtConnector, appDetails = Some(applicationDetails), getAppDetails = true, saveAppDetails = true) val result = giftsMadeBeforeDeathController.onPageLoad (createFakeRequest()) status(result) mustBe OK } "show predeceased name on page load" in { val firstName = CommonBuilder.firstNameGenerator val secondName = CommonBuilder.surnameGenerator val applicationDetails = CommonBuilder.buildApplicationDetails.copy(increaseIhtThreshold = Some(CommonBuilder.buildTnrbEligibility.copy(firstName = Some(firstName), lastName = Some(secondName)))) createMocksForApplication(mockCachingConnector, mockIhtConnector, appDetails = Some(applicationDetails), getAppDetails = true, saveAppDetails = true) val result = giftsMadeBeforeDeathController.onPageLoad (createFakeRequest()) status(result) mustBe OK ContentChecker.stripLineBreaks(contentAsString(result)) must include(messagesApi("iht.estateReport.tnrb.giftsMadeBeforeDeath.question", s"$firstName $secondName")) } "save application and go to Tnrb Overview page on submit" in { val applicationDetails = CommonBuilder.buildApplicationDetails.copy(increaseIhtThreshold = Some(CommonBuilder.buildTnrbEligibility.copy(firstName = Some(CommonBuilder.firstNameGenerator), lastName = Some(CommonBuilder.surnameGenerator)))) createMocksForApplication(mockCachingConnector, mockIhtConnector, appDetails = Some(applicationDetails), getAppDetails = true, saveAppDetails = true) val withGiftsMadeBeforeDeathValue = CommonBuilder.buildTnrbEligibility.copy(isGiftMadeBeforeDeath = Some(false)) val filledGiftsMadeBeforeDeathForm = giftMadeBeforeDeathForm.fill(withGiftsMadeBeforeDeathValue) implicit val request = createFakeRequest().withFormUrlEncodedBody(filledGiftsMadeBeforeDeathForm.data.toSeq: _*) val result = giftsMadeBeforeDeathController.onSubmit (request) status(result) mustBe SEE_OTHER redirectLocation(result) must be(Some(routes.TnrbOverviewController.onPageLoad().url + "#" + mockAppConfig.TnrbGiftsGivenAwayID)) } "go to KickOut page if gifts were given away in last 7 years " in { val applicationDetails = CommonBuilder.buildApplicationDetails.copy(widowCheck= Some(CommonBuilder.buildWidowedCheck)) createMocksForApplication(mockCachingConnector, mockIhtConnector, appDetails = Some(applicationDetails), getAppDetails = true, saveAppDetails = true) val withGiftsMadeBeforeDeathValue = CommonBuilder.buildTnrbEligibility.copy(isGiftMadeBeforeDeath = Some(true)) val filledGiftsMadeBeforeDeathForm = giftMadeBeforeDeathForm.fill(withGiftsMadeBeforeDeathValue) implicit val request = createFakeRequest().withFormUrlEncodedBody(filledGiftsMadeBeforeDeathForm.data.toSeq: _*) val result = giftsMadeBeforeDeathController.onSubmit (request) status(result) mustBe SEE_OTHER redirectLocation(result) must be(Some(iht.controllers.application.routes.KickoutAppController.onPageLoad.url)) } "go to successful Tnrb page on submit when its satisfies happy path" in { val applicationDetails = CommonBuilder.buildApplicationDetails.copy(increaseIhtThreshold = Some(CommonBuilder.buildTnrbEligibility.copy(firstName = Some(CommonBuilder.firstNameGenerator), lastName = Some(CommonBuilder.surnameGenerator), dateOfMarriage= Some(new LocalDate(1984, 12, 11)))), widowCheck = Some(CommonBuilder.buildWidowedCheck)) createMocksForApplication(mockCachingConnector, mockIhtConnector, appDetails = Some(applicationDetails), getAppDetails = true, saveAppDetails = true) val withGiftsMadeBeforeDeathValue = CommonBuilder.buildTnrbEligibility.copy(isGiftMadeBeforeDeath = Some(false)) val filledGiftsMadeBeforeDeathForm = giftMadeBeforeDeathForm.fill(withGiftsMadeBeforeDeathValue) implicit val request = createFakeRequest().withFormUrlEncodedBody(filledGiftsMadeBeforeDeathForm.data.toSeq: _*) val result = giftsMadeBeforeDeathController.onSubmit (request) status(result) mustBe SEE_OTHER redirectLocation(result) must be(Some(routes.TnrbSuccessController.onPageLoad().url)) } behave like controllerOnPageLoadWithNoExistingRegistrationDetails(mockCachingConnector, giftsMadeBeforeDeathController.onPageLoad(createFakeRequest())) } }
hmrc/iht-frontend
test/iht/controllers/application/tnrb/GiftsMadeBeforeDeathControllerTest.scala
Scala
apache-2.0
7,439
package net.javachallenge.util.settings /** * Object containing default values for the application. */ object Defaults { /** * The default environment for the application. */ val ENV: Environment = Production /** * The default XML parser for settings. */ val XML_SETTINGS_PARSER_CLASS_NAME = "net.javachallenge.util.settings.XMLSettingsParser" /** * Returns the default path of the configuration file's folder. */ def SETTINGS_PATH = "src/%s/config".format(Environment.current.folder) /** * The default file format for settings. */ val SETTINGS_FORMAT = "xml" /** * The default locale for the application if not present in configuration file. */ val LOCALE = "ja" /** * The default fallback for the application if not present in configuration file. */ val FALLBACK = "ja" /** * The default character of new line. */ val NEW_LINE = System.getProperty("line.separator") }
AI-comp/JavaChallenge2012
src/main/scala/net/javachallenge/util/settings/Defaults.scala
Scala
apache-2.0
948
/** * Copyright (C) 2013 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.fb import org.orbeon.oxf.fr.FormRunner._ import org.orbeon.oxf.util.ScalaUtils.CodePointsOps import org.orbeon.oxf.xforms.action.XFormsAPI._ import org.orbeon.saxon.om.NodeInfo import org.orbeon.scaxon.XML._ import scala.util.control.NonFatal trait PublishOps { // Publish a form and its attachments //@XPathFunction def publish(xhtml: NodeInfo, app: String, form: String, document: String, formVersion: String): Unit = { try { val (beforeURLs, _, publishedVersion) = putWithAttachments( data = xhtml.root, toBaseURI = "", // local publish fromBasePath = createFormDataBasePath("orbeon", "builder", isDraft = false, document), toBasePath = createFormDefinitionBasePath(app, form), filename = "form.xhtml", commonQueryString = encodeSimpleQuery(List("document" → document)), forceAttachments = false, // Using "next" for attachments works as attachments are saved first, and the persistence layer // uses the latest version of the published forms (not attachments) to figure what the next // version is formVersion = formVersion.trimAllToOpt ) setvalue(instanceRoot("fb-publish-instance").get / "published-attachments", beforeURLs.size.toString) setvalue(instanceRoot("fb-publish-instance").get / "published-version", publishedVersion.toString) toggle("fb-publish-dialog-success") } catch { case NonFatal(t) ⇒ toggle("fb-publish-dialog-error") } setfocus("fb-publish-dialog") } }
joansmith/orbeon-forms
src/main/scala/org/orbeon/oxf/fb/PublishOps.scala
Scala
lgpl-2.1
2,300
package slinky.core.annotations import slinky.core.FunctionalComponent import slinky.web.ReactDOM import org.scalajs.dom import org.scalatest.funsuite.AsyncFunSuite @react object SimpleFunctionalComponent { case class Props[T](in: Seq[T]) val component = FunctionalComponent[Props[_]] { case Props(in) => in.mkString(" ") } } @react object FunctionalComponentJustReExpose { val component = FunctionalComponent[Int] { in => in.toString } } @react object FunctionalComponentWithPrivateValComponent { private val component = FunctionalComponent[Int] { in => in.toString } } @react object FunctionalComponentWithProtectedValComponent { protected val component = FunctionalComponent[Int] { in => in.toString } } @react object FunctionalComponentEmptyProps { case class Props() val component = FunctionalComponent[Props](_ => "test") } @react object FunctionalComponentUnitProps { type Props = Unit val component = FunctionalComponent[Props](_ => "test") } class ReactAnnotatedFunctionalComponentTest extends AsyncFunSuite { test("Simple component has generated apply") { val container = dom.document.createElement("div") ReactDOM.render( SimpleFunctionalComponent(in = Seq(1, 2, 3)), container ) assert(container.innerHTML == "1 2 3") } test("Component without case class re-exports apply method") { val container = dom.document.createElement("div") ReactDOM.render( FunctionalComponentJustReExpose(1), container ) assert(container.innerHTML == "1") } test("Component with private component definition works") { val container = dom.document.createElement("div") ReactDOM.render( FunctionalComponentWithPrivateValComponent(1), container ) assert(container.innerHTML == "1") } test("Component with protected component definition works") { val container = dom.document.createElement("div") ReactDOM.render( FunctionalComponentWithProtectedValComponent(1), container ) assert(container.innerHTML == "1") } test("Component with empty props has shortcut apply") { val container = dom.document.createElement("div") ReactDOM.render( FunctionalComponentEmptyProps(), container ) assert(container.innerHTML == "test") } test("Component with unit props has shortcut apply") { val container = dom.document.createElement("div") ReactDOM.render( FunctionalComponentUnitProps(), container ) assert(container.innerHTML == "test") } }
shadaj/slinky
tests/src/test/scala-2/slinky/core/annotations/ReactAnnotatedFunctionalComponentTest.scala
Scala
mit
2,568
/* * Derived from https://github.com/spray/spray/blob/v1.1-M7/spray-http/src/main/scala/spray/http/parser/BasicRules.scala * * Copyright (C) 2011-2012 spray.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.http4s.parser import scala.reflect.ClassTag import scalaz.Validation import org.http4s.ParseResult import org.parboiled2._ import shapeless._ import shapeless.tag.@@ // direct implementation of http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2 private[http4s] trait Rfc2616BasicRules extends Parser { // scalastyle:off public.methods.have.type def Octet = rule { "\\u0000" - "\\u00FF" } def Char = rule { "\\u0000" - "\\u007F" } def Alpha = rule { LoAlpha | UpAlpha } def UpAlpha = rule { "A" - "Z" } def LoAlpha = rule { "a" - "z" } def Digit = rule { "0" - "9" } def AlphaNum = rule { Alpha | Digit } def CTL = rule { "\\u0000" - "\\u001F" | "\\u007F" } def CRLF = rule { str("\\r\\n") } def LWS = rule { optional(CRLF) ~ oneOrMore(anyOf(" \\t")) } def Text = rule { !CTL ~ ANY | LWS } def Hex = rule { "A" - "F" | "a" - "f" | Digit } def Separator = rule { anyOf("()<>@,;:\\\\\\"/[]?={} \\t") } def Token: Rule1[String] = rule { capture(oneOrMore(!CTL ~ !Separator ~ ANY)) } // TODO What's the replacement for DROP? def Comment: Rule0 = rule { "(" ~ zeroOrMore(CText | QuotedPair ~> DROP | Comment) ~ ")" } def DROP: Any => Unit = { _ => () } def CText = rule { !anyOf("()") ~ Text } def QuotedString: Rule1[String] = rule { "\\"" ~ zeroOrMore(QuotedPair | QDText) ~> {chars: Seq[Char] => new String(chars.toArray[scala.Char])} ~ "\\"" } def QDText: Rule1[Char] = rule { !ch('"') ~ Text ~ LASTCHAR } def QuotedPair: Rule1[Char] = rule { "\\\\" ~ Char ~ LASTCHAR } // helpers def OptWS = rule { zeroOrMore(LWS) } def ListSep = rule { oneOrMore("," ~ OptWS) } def LASTCHAR: Rule1[Char] = rule { push(input.charAt(cursor - 1)) } // we don't match scoped IPv6 addresses def IPv6Address = rule { oneOrMore(Hex | anyOf(":.")) } def IPv6Reference: Rule1[String] = rule { capture("[" ~ IPv6Address ~ "]") } // scalastyle:on public.methods.have.type } private[http4s] object Rfc2616BasicRules { def token(in: ParserInput): ParseResult[String] = new Rfc2616BasicRules { override def input: ParserInput = in }.Token.run()(ScalazDeliverySchemes.Disjunction) def isToken(in: ParserInput) = token(in).isRight }
hvesalai/http4s
core/src/main/scala/org/http4s/parser/Rfc2616BasicRules.scala
Scala
apache-2.0
2,929
package org.akoshterek.backgammon.data class TrainEntry (val positionId: String, val reward: Array[Double]) { }
akoshterek/MultiGammonJava
multi-gammon-util/src/main/java/org/akoshterek/backgammon/data/TrainEntry.scala
Scala
gpl-3.0
112
package org.jetbrains.plugins.scala package lang.psi.types import com.intellij.psi.PsiClass import org.jetbrains.plugins.scala.extensions.{PsiClassExt, PsiElementExt, PsiParameterExt} import org.jetbrains.plugins.scala.lang.psi.api.base.ScPrimaryConstructor import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass import org.jetbrains.plugins.scala.lang.psi.types.api.{ExtractClass, ValType} /** * Nikolay.Tropin * 2014-10-02 */ object ValueClassType { def unapply(tp: ScType): Option[ScType] = { tp match { case _: ValType => None case ExtractClass(cl: ScClass) if isValueClass(cl) => cl.constructors match { case Seq(pc: ScPrimaryConstructor) => pc.parameters.headOption.map(_.paramType()) case _ => None } case _ => None } } def isValueType(tp: ScType): Boolean = unapply(tp).isDefined def isValueClass(cl: PsiClass): Boolean = cl match { case scClass: ScClass => scClass.parameters match { case Seq(p) if isValOrCompiled(p) => extendsAnyVal(cl) case _ => false } case _ => false } def extendsAnyVal(cl: PsiClass): Boolean = cl.getSupers.map(_.qualifiedName).contains("scala.AnyVal") private def isValOrCompiled(p: ScClassParameter) = { if (p.isVal || p.isCaseClassVal) true else p.containingScalaFile.exists(_.isCompiled) } }
loskutov/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/types/ValueClassType.scala
Scala
apache-2.0
1,473
package org.denigma.preview.routes import akka.http.scaladsl.model._ import akka.http.scaladsl.server.{Directives, Route} import org.denigma.preview.templates.MyStyles import scalacss.Defaults._ class Head extends Directives { lazy val webjarsPrefix = "lib" lazy val resourcePrefix = "resources" def mystyles = path("styles" / "mystyles.css"){ complete { HttpResponse( entity = HttpEntity(MediaTypes.`text/css`.withCharset(HttpCharsets.`UTF-8`), MyStyles.render )) } } def loadResources = pathPrefix(resourcePrefix ~ Slash) { getFromResourceDirectory("") } def webjars =pathPrefix(webjarsPrefix ~ Slash) { getFromResourceDirectory(webjarsPrefix) } def routes: Route = mystyles ~ webjars ~ loadResources }
denigma/akka-http-extensions
preview/backend/src/main/scala/org/denigma/preview/routes/Head.scala
Scala
mpl-2.0
757
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.samza.checkpoint import java.util.concurrent.ConcurrentHashMap import org.apache.samza.system.SystemStream import org.apache.samza.system.SystemStreamPartition import org.apache.samza.system.SystemStreamMetadata import org.apache.samza.system.SystemStreamMetadata.OffsetType import org.apache.samza.SamzaException import scala.collection.JavaConversions._ import org.apache.samza.util.Logging import org.apache.samza.config.Config import org.apache.samza.config.StreamConfig.Config2Stream import org.apache.samza.config.SystemConfig.Config2System import org.apache.samza.system.SystemAdmin import org.apache.samza.container.TaskName import scala.collection._ /** * OffsetSetting encapsulates a SystemStream's metadata, default offset, and * reset offset settings. It's just a convenience class to make OffsetManager * easier to work with. */ case class OffsetSetting( /** * The metadata for the SystemStream. */ metadata: SystemStreamMetadata, /** * The default offset (oldest, newest, or upcoming) for the SystemStream. * This setting is used when no checkpoint is available for a SystemStream * if the job is starting for the first time, or the SystemStream has been * reset (see resetOffsets, below). */ defaultOffset: OffsetType, /** * Whether the SystemStream's offset should be reset or not. Determines * whether an offset should be ignored at initialization time, even if a * checkpoint is available. This is useful for jobs that wish to restart * reading from a stream at a different position than where they last * checkpointed. If this is true, then defaultOffset will be used to find * the new starting position in the stream. */ resetOffset: Boolean) /** * OffsetManager object is a helper that does wiring to build an OffsetManager * from a config object. */ object OffsetManager extends Logging { def apply( systemStreamMetadata: Map[SystemStream, SystemStreamMetadata], config: Config, checkpointManager: CheckpointManager = null, systemAdmins: Map[String, SystemAdmin] = Map(), checkpointListeners: Map[String, CheckpointListener] = Map(), offsetManagerMetrics: OffsetManagerMetrics = new OffsetManagerMetrics) = { debug("Building offset manager for %s." format systemStreamMetadata) val offsetSettings = systemStreamMetadata .map { case (systemStream, systemStreamMetadata) => // Get default offset. val streamDefaultOffset = config.getDefaultStreamOffset(systemStream) val systemDefaultOffset = config.getDefaultSystemOffset(systemStream.getSystem) val defaultOffsetType = if (streamDefaultOffset.isDefined) { OffsetType.valueOf(streamDefaultOffset.get.toUpperCase) } else if (systemDefaultOffset.isDefined) { OffsetType.valueOf(systemDefaultOffset.get.toUpperCase) } else { info("No default offset for %s defined. Using upcoming." format systemStream) OffsetType.UPCOMING } debug("Using default offset %s for %s." format (defaultOffsetType, systemStream)) // Get reset offset. val resetOffset = config.getResetOffset(systemStream) debug("Using reset offset %s for %s." format (resetOffset, systemStream)) // Build OffsetSetting so we can create a map for OffsetManager. (systemStream, OffsetSetting(systemStreamMetadata, defaultOffsetType, resetOffset)) }.toMap new OffsetManager(offsetSettings, checkpointManager, systemAdmins, checkpointListeners, offsetManagerMetrics) } } /** * OffsetManager does several things: * * <ul> * <li>Loads last checkpointed offset for all input SystemStreamPartitions in a * SamzaContainer.</li> * <li>Uses last checkpointed offset to figure out the next offset to start * reading from for each input SystemStreamPartition in a SamzaContainer</li> * <li>Keep track of the last processed offset for each SystemStreamPartitions * in a SamzaContainer.</li> * <li>Checkpoints the last processed offset for each SystemStreamPartitions * in a SamzaContainer periodically to the CheckpointManager.</li> * </ul> * * All partitions must be registered before start is called, and start must be * called before get/update/checkpoint/stop are called. */ class OffsetManager( /** * Offset settings for all streams that the OffsetManager is managing. */ val offsetSettings: Map[SystemStream, OffsetSetting] = Map(), /** * Optional checkpoint manager for checkpointing offsets whenever * checkpoint is called. */ val checkpointManager: CheckpointManager = null, /** * SystemAdmins that are used to get next offsets from last checkpointed * offsets. Map is from system name to SystemAdmin class for the system. */ val systemAdmins: Map[String, SystemAdmin] = Map(), /** * Map of checkpointListeners for the systems that chose to provide one. * OffsetManager will call the listeners on each checkpoint. */ checkpointListeners: Map[String, CheckpointListener] = Map(), /** * offsetManagerMetrics for keeping track of checkpointed offsets of each SystemStreamPartition. */ val offsetManagerMetrics: OffsetManagerMetrics = new OffsetManagerMetrics) extends Logging { /** * Last offsets processed for each SystemStreamPartition. */ val lastProcessedOffsets = new ConcurrentHashMap[TaskName, ConcurrentHashMap[SystemStreamPartition, String]]() /** * Offsets to start reading from for each SystemStreamPartition. This * variable is populated after all checkpoints have been restored. */ var startingOffsets = Map[TaskName, Map[SystemStreamPartition, String]]() /** * The set of system stream partitions that have been registered with the * OffsetManager, grouped by the taskName they belong to. These are the SSPs * that will be tracked within the offset manager. */ val systemStreamPartitions = mutable.Map[TaskName, mutable.Set[SystemStreamPartition]]() def register(taskName: TaskName, systemStreamPartitionsToRegister: Set[SystemStreamPartition]) { systemStreamPartitions.getOrElseUpdate(taskName, mutable.Set[SystemStreamPartition]()).addAll(systemStreamPartitionsToRegister) // register metrics systemStreamPartitions.foreach { case (taskName, ssp) => ssp.foreach (ssp => offsetManagerMetrics.addCheckpointedOffset(ssp, "")) } } def start { registerCheckpointManager loadOffsetsFromCheckpointManager stripResetStreams loadStartingOffsets loadDefaults info("Successfully loaded last processed offsets: %s" format lastProcessedOffsets) info("Successfully loaded starting offsets: %s" format startingOffsets) } /** * Set the last processed offset for a given SystemStreamPartition. */ def update(taskName: TaskName, systemStreamPartition: SystemStreamPartition, offset: String) { lastProcessedOffsets.putIfAbsent(taskName, new ConcurrentHashMap[SystemStreamPartition, String]()) if (offset != null) { lastProcessedOffsets.get(taskName).put(systemStreamPartition, offset) } } /** * Get the last processed offset for a SystemStreamPartition. */ def getLastProcessedOffset(taskName: TaskName, systemStreamPartition: SystemStreamPartition): Option[String] = { Option(lastProcessedOffsets.get(taskName)).map(_.get(systemStreamPartition)) } /** * Get the starting offset for a SystemStreamPartition. This is the offset * where a SamzaContainer begins reading from when it starts up. */ def getStartingOffset(taskName: TaskName, systemStreamPartition: SystemStreamPartition) = { startingOffsets.get(taskName) match { case Some(sspToOffsets) => sspToOffsets.get(systemStreamPartition) case None => None } } /** * Checkpoint all offsets for a given TaskName using the CheckpointManager. */ def checkpoint(taskName: TaskName) { if (checkpointManager != null || checkpointListeners.nonEmpty) { debug("Checkpointing offsets for taskName %s." format taskName) val sspsForTaskName = systemStreamPartitions.getOrElse(taskName, throw new SamzaException("No such SystemStreamPartition set " + taskName + " registered for this checkpointmanager")).toSet val sspToOffsets = lastProcessedOffsets.getOrElse(taskName, null) val partitionOffsets = if(sspToOffsets != null) { sspToOffsets.filterKeys(sspsForTaskName.contains(_)) } else { warn(taskName + " is not found... ") Map[SystemStreamPartition, String]() } val checkpoint = new Checkpoint(partitionOffsets) if(checkpointManager != null) { checkpointManager.writeCheckpoint(taskName, checkpoint) if(sspToOffsets != null) { sspToOffsets.foreach { case (ssp, cp) => offsetManagerMetrics.checkpointedOffsets(ssp).set(cp) } } } // invoke checkpoint listeners //partitionOffsets.groupBy(_._1.getSystem).foreach { partitionOffsets.groupBy { case (ssp, _) => ssp.getSystem }.foreach { case (systemName:String, offsets: Map[SystemStreamPartition, String]) => { // Option is empty if there is no checkpointListener for this systemName checkpointListeners.get(systemName).foreach(_.onCheckpoint(offsets)) } } } else { debug("Skipping checkpointing for taskName %s because no checkpoint manager/callback is defined." format taskName) } } def stop { if (checkpointManager != null) { debug("Shutting down checkpoint manager.") checkpointManager.stop } else { debug("Skipping checkpoint manager shutdown because no checkpoint manager is defined.") } } /** * Register all partitions with the CheckpointManager. */ private def registerCheckpointManager { if (checkpointManager != null) { debug("Registering checkpoint manager.") systemStreamPartitions.keys.foreach(checkpointManager.register) } else { debug("Skipping checkpoint manager registration because no manager was defined.") } } /** * Loads last processed offsets from checkpoint manager for all registered * partitions. */ private def loadOffsetsFromCheckpointManager { if (checkpointManager != null) { debug("Loading offsets from checkpoint manager.") checkpointManager.start val result = systemStreamPartitions .keys .flatMap(restoreOffsetsFromCheckpoint(_)) .toMap result.map { case (taskName, sspToOffset) => { lastProcessedOffsets.put(taskName, new ConcurrentHashMap[SystemStreamPartition, String](sspToOffset.filter { case (systemStreamPartition, offset) => val shouldKeep = offsetSettings.contains(systemStreamPartition.getSystemStream) if (!shouldKeep) { info("Ignoring previously checkpointed offset %s for %s since the offset is for a stream that is not currently an input stream." format (offset, systemStreamPartition)) } info("Checkpointed offset is currently %s for %s" format (offset, systemStreamPartition)) shouldKeep })) } } } else { debug("Skipping offset load from checkpoint manager because no manager was defined.") } } /** * Loads last processed offsets for a single taskName. */ private def restoreOffsetsFromCheckpoint(taskName: TaskName): Map[TaskName, Map[SystemStreamPartition, String]] = { debug("Loading checkpoints for taskName: %s." format taskName) val checkpoint = checkpointManager.readLastCheckpoint(taskName) if (checkpoint != null) { Map(taskName -> checkpoint.getOffsets.toMap) } else { info("Did not receive a checkpoint for taskName %s. Proceeding without a checkpoint." format taskName) Map(taskName -> Map()) } } /** * Removes offset settings for all SystemStreams that are to be forcibly * reset using resetOffsets. */ private def stripResetStreams { val systemStreamPartitionsToReset = getSystemStreamPartitionsToReset(lastProcessedOffsets) systemStreamPartitionsToReset.foreach { case (taskName, systemStreamPartitions) => { systemStreamPartitions.foreach { systemStreamPartition => { val offset = lastProcessedOffsets(taskName).get(systemStreamPartition) info("Got offset %s for %s, but ignoring, since stream was configured to reset offsets." format (offset, systemStreamPartition)) } } } } lastProcessedOffsets.keys().foreach { taskName => lastProcessedOffsets.get(taskName).keySet().removeAll(systemStreamPartitionsToReset(taskName)) } } /** * Returns a map of all SystemStreamPartitions in lastProcessedOffsets that need to be reset */ private def getSystemStreamPartitionsToReset(taskNameTosystemStreamPartitions: ConcurrentHashMap[TaskName, ConcurrentHashMap[SystemStreamPartition, String]]): Map[TaskName, Set[SystemStreamPartition]] = { taskNameTosystemStreamPartitions.map { case (taskName, sspToOffsets) => { taskName -> (sspToOffsets.filter { case (systemStreamPartition, offset) => { val systemStream = systemStreamPartition.getSystemStream offsetSettings .getOrElse(systemStream, throw new SamzaException("Attempting to reset a stream that doesn't have offset settings %s." format systemStream)) .resetOffset } }.keys.toSet) } } } /** * Use last processed offsets to get next available offset for each * SystemStreamPartition, and populate startingOffsets. */ private def loadStartingOffsets { startingOffsets = lastProcessedOffsets.map { case (taskName, sspToOffsets) => { taskName -> { sspToOffsets.groupBy(_._1.getSystem).flatMap { case (systemName, systemStreamPartitionOffsets) => systemAdmins .getOrElse(systemName, throw new SamzaException("Missing system admin for %s. Need system admin to load starting offsets." format systemName)) .getOffsetsAfter(systemStreamPartitionOffsets) } } } } } /** * Use defaultOffsets to get a next offset for every SystemStreamPartition * that was registered, but has no offset. */ private def loadDefaults { val taskNameToSSPs: Map[TaskName, Set[SystemStreamPartition]] = systemStreamPartitions taskNameToSSPs.foreach { case (taskName, systemStreamPartitions) => { systemStreamPartitions.foreach { systemStreamPartition => if (!startingOffsets.contains(taskName) || !startingOffsets(taskName).contains(systemStreamPartition)) { val systemStream = systemStreamPartition.getSystemStream val partition = systemStreamPartition.getPartition val offsetSetting = offsetSettings.getOrElse(systemStream, throw new SamzaException("Attempting to load defaults for stream %s, which has no offset settings." format systemStream)) val systemStreamMetadata = offsetSetting.metadata val offsetType = offsetSetting.defaultOffset debug("Got default offset type %s for %s" format (offsetType, systemStreamPartition)) val systemStreamPartitionMetadata = systemStreamMetadata .getSystemStreamPartitionMetadata .get(partition) if (systemStreamPartitionMetadata != null) { val nextOffset = { val requested = systemStreamPartitionMetadata.getOffset(offsetType) if (requested == null) { warn("Requested offset type %s in %s, but the stream is empty. Defaulting to the upcoming offset." format (offsetType, systemStreamPartition)) systemStreamPartitionMetadata.getOffset(OffsetType.UPCOMING) } else requested } debug("Got next default offset %s for %s" format (nextOffset, systemStreamPartition)) startingOffsets.get(taskName) match { case Some(sspToOffsets) => startingOffsets += taskName -> (sspToOffsets + (systemStreamPartition -> nextOffset)) case None => startingOffsets += taskName -> Map(systemStreamPartition -> nextOffset) } } else { throw new SamzaException("No metadata available for partition %s." format systemStreamPartitionMetadata) } } } } } } }
nickpan47/samza
samza-core/src/main/scala/org/apache/samza/checkpoint/OffsetManager.scala
Scala
apache-2.0
17,434
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package models case class YesOrNoQuestion(question: String, answer: Boolean)
hmrc/vat-registration-frontend
app/models/YesOrNoQuestion.scala
Scala
apache-2.0
683
/** I use spark package to hack all those private Spark classes */ package org.apache.spark package zpark import java.net.InetSocketAddress import scalaz.{-\\/, \\/, \\/-} import scalaz.concurrent.{Task, Strategy} import scalaz.stream._ import scala.concurrent.duration._ import ReceiveY._ import wye.Request import java.util.concurrent.{ScheduledExecutorService, ExecutorService, ThreadFactory, Executors} import java.nio.channels.spi.AsynchronousChannelProvider import java.util.concurrent.ThreadFactory import java.util.concurrent.atomic.AtomicInteger object NioServer { def apply(address: InetSocketAddress, w: Writer1[Bytes, Bytes, Bytes]): Process[Task, Bytes] = { val srv = nio.server(bind = address/*, reuseAddress = false*/)(CustomStrategy.buildAsynchronousChannelGroup).map { client => client.flatMap { ex => ex.readThrough(w).run() } } val S = CustomStrategy.buildNewStrategy // Merges all client streams merge.mergeN(srv)(S) } def echo(address: InetSocketAddress): Process[Task, Bytes] = { // This is a Writer1 that echoes everything it receives to the client and emits it locally def echoAll: Writer1[Bytes, Bytes, Bytes] = Process.receive1[Bytes, Bytes \\/ Bytes] { i => // echoes on left, emits on right and then loop (fby = followed by) Process.emitSeq( Seq(\\/-(i), -\\/(i)) ) fby echoAll } apply(address, echoAll) } /** A server that acks all received packet by its size as a 4-bytes int */ def ackSize(address: InetSocketAddress): Process[Task, Bytes] = { def ackAll: Writer1[Bytes, Bytes, Bytes] = { Process.receive1[Bytes, Bytes \\/ Bytes] { i => // println("server received " + new String(i.toArray)) val arr = java.nio.ByteBuffer.allocate(4).putInt(i.size) arr.clear() //println("SZ:"+Bytes.of(arr)) Process.emitSeq(Seq(\\/-(i), -\\/(Bytes.of(arr)))) fby ackAll } } apply(address, ackAll) } } object NioClient { import Process._ /** a client sending all data in input process and awaiting them to be echo'ed by the server */ def echo(address: InetSocketAddress, data: Bytes): Process[Task, Bytes] = { // the custom Wye managing business logic def echoLogic: WyeW[Bytes, Bytes, Bytes, Bytes] = { def go(collected: Int): WyeW[Bytes, Bytes, Bytes, Bytes] = { // Create a Wye that can receive on both sides receiveBoth { // Receive on left == receive from server case ReceiveL(rcvd) => // `emitO` outputs on `I2` branch and then... emitO(rcvd) fby // if we have received everything sent, halt (if (collected + rcvd.size >= data.size) halt // else go on collecting else go(collected + rcvd.size)) // Receive on right == receive on `W2` branch == your external data source case ReceiveR(data) => // `emitW` outputs on `W` branch == sending to server // and loops emitW(data) fby go(collected) // When server closes case HaltL(rsn) => Halt(rsn) // When client closes, we go on collecting echoes case HaltR(_) => go(collected) } } // Init go(0) } // Finally wiring all... for { ex <- nio.connect(address) rslt <- ex.wye(echoLogic).run(Process.emit(data)) } yield { rslt } } /** a client that send all data emitted by input process and awaits for full size ack by server */ def sendAndCheckSize(address: InetSocketAddress, data: Process[Task, Bytes]): Process[Task, Bytes] = { def ack: WyeW[Bytes, Int \\/ Bytes, Bytes, Bytes] = { def go(buf: Bytes, collected: Int, expected: Int, collecting: Boolean): WyeW[Bytes, Int \\/ Bytes, Bytes, Bytes] = { receiveBoth { case ReceiveL(-\\/(_)) => go(buf, collected, expected, collecting) case ReceiveL(\\/-(rcvd)) => // println(s"Client received:$rcvd - Buf: $buf - Collected:$collected - Expected:$expected - collecting:$collecting") emitO(rcvd) fby { if(buf.size + rcvd.size < 4) { go(buf ++ rcvd, collected, expected, collecting) } else { // split Bytes 4-bytes per 4-bytes to Int, sum them & returns rest def splitter(buf: Bytes): (Int, Bytes) = { var (l, r) = buf.splitAt(4) var sz: Int = 0 while(!l.isEmpty) { sz += l.asByteBuffer.getInt() if(r.size >= 4) { val (l_, r_) = r.splitAt(4) l = l_ r = r_ } else { l = Bytes.empty } } (sz, r.compact) } val (sz, nextBuf) = splitter(buf ++ rcvd) // println(s"BUF:$buf - NextBuf:${nextBuf} - SZ:$sz") if(collecting && collected + sz >= expected) halt else go(nextBuf, collected + sz, expected, collecting) } } case ReceiveR(data) => // println("Client sending "+new String(data.toArray)+s" - Collected:$collected - Expected:$expected") tell(data) fby go(buf, collected, expected + data.size, collecting) case HaltL(rsn) => /*println("server halt");*/ Halt(rsn) case HaltR(_) => /*println("input halt");*/ if(collected >= expected) halt else go(buf, collected, expected, true) } } go(Bytes.empty, 0, 0, false) } val AG = CustomStrategy.buildAsynchronousChannelGroup for { ex <- nio.connect(to=address/*, noDelay=true, reuseAddress = false*/)(AG) rslt <- flow(ex, data)(ack) //rslt <- ex.wye(ack).run(p=data, terminateOn=Request.Both) } yield { rslt } } /** custom hacked flow function */ def flow[I, W, I2, W2](self: Exchange[I, W], input: Process[Task, W2])(y: WyeW[W, Int \\/ I, W2, I2])(implicit S: Strategy = Strategy.DefaultStrategy): Process[Task, I2] = {//Exchange[I2, W2] = { val wq = async.boundedQueue[W](0) val w2q = async.boundedQueue[W2](0) def mergeHaltBoth[I]: Wye[I,I,I] = { def go: Wye[I,I,I] = receiveBoth[I,I,I]({ case ReceiveL(i) => /*println("L:"+i);*/ emit(i) fby go case ReceiveR(i) => /*println("R:"+i);*/ emit(i) fby go case HaltL(rsn) => /*println("HALTL:"+rsn);*/ Halt(rsn) case HaltR(rsn) => /*println("HALTR:"+rsn);*/ w2q.close.runAsync(_ => ()); go }) go } def cleanup: Process[Task, Nothing] = eval_(wq.close) fby eval_(Task.delay(w2q.close.runAsync(_ => ()))) def receive: Process[Task, I] = self.read onComplete cleanup def send: Process[Task, Unit] = wq.dequeue to self.write def sendAndReceive = { val (o, ny) = y.unemit (emitSeq(o) fby ((wq.size.discrete either receive).wye(w2q.dequeue)(ny)(S) onComplete cleanup) either send).flatMap { case \\/-(o) => halt case -\\/(-\\/(o)) => eval_(wq.enqueueOne(o)) case -\\/(\\/-(b)) => emit(b) } } val res = Exchange(sendAndReceive, w2q.enqueue) res.read.wye((input to res.write).drain)(mergeHaltBoth[I2]) } } object NioUtils { import scalaz._ import Scalaz._ import Process._ def localAddress(port:Int) = new InetSocketAddress("127.0.0.1", port) def rechunk[I](p: I => (Vector[I], Boolean))(implicit I: Monoid[I]): Process1[I, I] = { import Process._ def go(acc: I): Process1[I, I] = { await1[I].flatMap { i => // println("i:"+i) val (v, emitLast) = p(i) // println(s"acc:$acc v:$v emitLast:$emitLast") v.size match { // imagine the separator is "\\n" // "\\n" case 0 => emit(acc) fby go(I.zero) // "1234" case 1 => if(emitLast) { emit(I.append(acc, v.head)) fby go(I.zero) } else (go(I.append(acc, v.head)) orElse emit(I.append(acc, v.head))) case _ => // \\n1234 if(v.head == I.zero) { if(emitLast) { emit(acc) ++ emitAll(v) fby go(I.zero) } else { emit(acc) ++ emitAll(v.init) ++ (go(v.last) orElse emit(v.last)) } } // 1234\\n123\\n else if(emitLast) { emit(I.append(acc, v.head)) ++ emitAll(v.tail) fby go(I.zero) } // 1234\\n123... else { emit(I.append(acc, v.head)) ++ emitAll(v.init) ++ (go(v.last) orElse emit(v.last)) } } } } go(I.zero) } } object CustomStrategy { import Strategy._ def SemiExecutorService: ExecutorService = { import Executors._ newFixedThreadPool(Runtime.getRuntime.availableProcessors / 2, new ThreadFactory { def newThread(r: Runnable) = { val t = defaultThreadFactory.newThread(r) t.setDaemon(true) t } }) } def buildNewStrategy: Strategy = Executor(SemiExecutorService) def buildAsynchronousChannelGroup = { val idx = new AtomicInteger(0) AsynchronousChannelProvider.provider().openAsynchronousChannelGroup( /*Runtime.getRuntime.availableProcessors() / 2*/ 2 max 2, new ThreadFactory { def newThread(r: Runnable): Thread = { val t = new Thread(r, s"scalaz-stream-nio-${idx.incrementAndGet()}") t.setDaemon(true) t } } ) } def scheduler = { Executors.newScheduledThreadPool(4, new ThreadFactory { def newThread(r: Runnable) = { val t = Executors.defaultThreadFactory.newThread(r) t.setDaemon(true) t.setName("scheduled-task-thread") t } }) } }
mandubian/zpark-ztream
src/main/scala/nio.scala
Scala
apache-2.0
9,855
package scala.meta package taxonomic import org.scalameta.data._ import org.scalameta.unreachable @data class TaxonomicException(artifact: Artifact, message: String, cause: Option[Throwable]) extends Exception(s"failed to resolve $artifact because $message", cause.orNull) with ScalametaException { def this(artifact: Artifact, message: String) = this(artifact, message, None) def this(artifact: Artifact, message: String, cause: Throwable) = this(artifact, message, Some(cause)) override def toString = super.toString }
beni55/scalameta
scalameta/taxonomic/src/main/scala/scala/meta/taxonomic/Exceptions.scala
Scala
bsd-3-clause
529
// scalac: -Werror -Xlint:deprecation // object Test { def foo(i: Int, l: Long): Unit = { val i_f: Float = i // deprecated val i_d: Double = i // OK val l_f: Float = l // deprecated val l_d: Double = l // deprecated } def imp: Unit = { implicitly[Int => Float] // deprecated implicitly[Int => Double] // OK implicitly[Long => Float] // deprecated implicitly[Long => Double] // deprecated } // don't leak silent warning from float conversion val n = 42 def clean = n max 27 val posFloat:Float = 16777216L // OK val truncatedPosFloat:Float = 16777217L // deprecated val losslessPosFloat:Float = 16777218L // OK -- lossless val negFloat: Float = - 16777216L // OK val truncatedNegFloat: Float = - 16777217L // deprecated val losslessNegFloat: Float = - 16777218L // OK -- lossless val posFloatI:Float = 16777216 // OK val truncatedPosFloatI:Float = 16777217 // deprecated val losslessPosFloatI:Float = 16777218 // OK -- lossless val negFloatI: Float = - 16777216 // OK val truncatedNegFloatI: Float = - 16777217 // deprecated val losslessNegFloatI: Float = - 16777218 // OK -- lossless val posDouble:Double = 18014398509481984L// OK val truncatedPosDouble:Double = 18014398509481985L // deprecated val losslessPosDouble:Double = 18014398509481988L // OK -- lossless val negDouble: Double = - 18014398509481984L // OK val truncatedNegDouble: Double = - 18014398509481985L // deprecated val losslessNegDouble: Double = - 18014398509481988L // OK -- lossless // literals don't get a pass -- *especially* literals! // 0x7ffffffc0 - 0x7fffffff // Set[Float](2147483584, 2147483645, 2147483646, 2147483647) def literals = Set[Float](0x7fffffc0, 0x7ffffffd, 0x7ffffffe, 0x7fffffff) def longingly = Set[Float](0x7fffffc0L, 0x7ffffffdL, 0x7ffffffeL, 0x7fffffffL) def `pick one` = Set[Float](0x1000003, 0x1000004, 0x1000005) def `no warn` = 1f + 2147483584 def `no warn either` = 2147483584 + 1f def f = 1f def `no warn sowieso` = f + 2147483584 }
lrytz/scala
test/files/neg/deprecated_widening.scala
Scala
apache-2.0
2,059
package ua.com.serious_panda.settings import java.awt.event.{ActionEvent, ActionListener} import java.util.Locale import javax.swing._ import javax.swing.event.{ChangeEvent, ChangeListener} import com.jhlabs.awt.ParagraphLayout /** * * Created by aleo on 03.08.14. */ case class Property[T](key: String, defaultValue: T) extends PropertiesTrait[T] { def this(key: String, defaultValue: T, helpText: Option[String] = None, nameResourceBundle: String = null, keyInResourceBundle: String = null) { this(key, defaultValue) this.nameResourceBundle = nameResourceBundle this.keyInResourceBundle = keyInResourceBundle this._helpText = helpText } /** * Метод який перетворює об’єкт у строку * @param value об’єкт який необхідно перетворити у строку * @return об’єкт перетворений у строку */ override protected def objectToString(value: T): String = { value match { case x: Long => String.valueOf(x) case x: Int => String.valueOf(x) case x: Short => String.valueOf(x) case x: Byte => String.valueOf(x) case x: Double => String.valueOf(x) case x: Float => String.valueOf(x) case x: String => x case x: Boolean => String.valueOf(x) case x: java.util.Locale => s"${x.getLanguage}_${x.getCountry}_${x.getVariant}" case _ => throw new IllegalArgumentException("Not support Type") } } /** * Метод який перетворює строку у об’єкт * @param valueString строка яку необхідно перетворити у об’єкт * @return об’єкт */ override protected def stringToObject(valueString: String): T = { val res = this match { case Property(key: String, defaultValue: Long) => valueString.toLong case Property(key: String, defaultValue: Int) => valueString.toInt case Property(key: String, defaultValue: Short) => valueString.toShort case Property(key: String, defaultValue: Byte) => valueString.toByte case Property(key: String, defaultValue: Float) => valueString.toFloat case Property(key: String, defaultValue: Double) => valueString.toDouble case Property(key: String, defaultValue: String) => valueString case Property(key: String, defaultValue: Boolean) => valueString.toBoolean case Property(key: String, defaultValue: Locale) => val arr = valueString.split("_") arr.length match { case 1 => new Locale(arr(0)) case 2 => new Locale(arr(0), arr(1)) case 3 => new Locale(arr(0), arr(1), arr(2)) case _ => throw new IllegalArgumentException } case _ => throw new IllegalArgumentException } res.asInstanceOf[T] } def inc(implicit file: java.io.File): T = { val oldValue = this.value val v = this match { case x@Property(key: String, defaultValue: Long) => oldValue.asInstanceOf[Long] + 1L case x@Property(key: String, defaultValue: Int) => oldValue.asInstanceOf[Int] + 1 case x@Property(key: String, defaultValue: Short) => oldValue.asInstanceOf[Short] + 1 case x@Property(key: String, defaultValue: Byte) => oldValue.asInstanceOf[Byte] + 1 case x@Property(key: String, defaultValue: Float) => oldValue.asInstanceOf[Float] + 1 case x@Property(key: String, defaultValue: Double) => oldValue.asInstanceOf[Double] + 1.0 case _ => throw new UnsupportedOperationException } val newValue = v.asInstanceOf[T] this.value = newValue newValue } def dec(implicit file: java.io.File): T = { val oldValue = this.value val v = this match { case x@Property(key: String, defaultValue: Long) => oldValue.asInstanceOf[Long] - 1L case x@Property(key: String, defaultValue: Int) => oldValue.asInstanceOf[Int] - 1 case x@Property(key: String, defaultValue: Short) => oldValue.asInstanceOf[Short] - 1 case x@Property(key: String, defaultValue: Byte) => oldValue.asInstanceOf[Byte] - 1 case x@Property(key: String, defaultValue: Float) => oldValue.asInstanceOf[Float] - 1 case x@Property(key: String, defaultValue: Double) => oldValue.asInstanceOf[Double] - 1.0 case _ => throw new UnsupportedOperationException } val newValue = v.asInstanceOf[T] this.value = newValue newValue } override def editableComponent(implicit file: java.io.File): JComponent = { var component: JComponent = null this match { case Property(key: String, defaultValue: String) => component = new JTextField(this.value.toString, this.value.toString.length) component.asInstanceOf[JTextField].addActionListener(new ActionListener { override def actionPerformed(event: ActionEvent): Unit = { value = stringToObject(component.asInstanceOf[JTextField].getText) } }) case Property(key: String, defaultValue: Long) => component = new JSpinner(new SpinnerNumberModel(this.value.asInstanceOf[Long], Long.MinValue, Long.MaxValue, 1)) component.asInstanceOf[JSpinner].addChangeListener(new ChangeListener { override def stateChanged(event: ChangeEvent): Unit = { value = stringToObject(event.getSource.asInstanceOf[JSpinner].getValue.toString.toDouble.toLong.toString) } }) case Property(key: String, defaultValue: Int) => component = new JSpinner(new SpinnerNumberModel(this.value.asInstanceOf[Int], Int.MinValue, Int.MaxValue, 1)) component.asInstanceOf[JSpinner].addChangeListener(new ChangeListener { override def stateChanged(event: ChangeEvent): Unit = { value = stringToObject(event.getSource.asInstanceOf[JSpinner].getValue.toString.toDouble.toInt.toString) } }) case Property(key: String, defaultValue: Short) => component = new JSpinner(new SpinnerNumberModel(this.value.asInstanceOf[Short], Short.MinValue, Short.MaxValue, 1)) component.asInstanceOf[JSpinner].addChangeListener(new ChangeListener { override def stateChanged(event: ChangeEvent): Unit = { value = stringToObject(event.getSource.asInstanceOf[JSpinner].getValue.toString.toDouble.toShort.toString) } }) case Property(key: String, defaultValue: Byte) => component = new JSpinner(new SpinnerNumberModel(this.value.asInstanceOf[Byte], Byte.MinValue, Byte.MaxValue, 1)) component.asInstanceOf[JSpinner].addChangeListener(new ChangeListener { override def stateChanged(event: ChangeEvent): Unit = { value = stringToObject(event.getSource.asInstanceOf[JSpinner].getValue.toString.toDouble.toByte.toString) } }) case Property(key: String, defaultValue: Float) => component = new JSpinner(new SpinnerNumberModel(this.value.asInstanceOf[Float], Long.MinValue, Long.MaxValue, 0.5)) component.asInstanceOf[JSpinner].addChangeListener(new ChangeListener { override def stateChanged(event: ChangeEvent): Unit = { value = stringToObject(event.getSource.asInstanceOf[JSpinner].getValue.toString) } }) case Property(key: String, defaultValue: Double) => component = new JSpinner(new SpinnerNumberModel(this.value.asInstanceOf[Double], Long.MinValue, Long.MaxValue, 0.5)) component.asInstanceOf[JSpinner].addChangeListener(new ChangeListener { override def stateChanged(event: ChangeEvent): Unit = { value = stringToObject(event.getSource.asInstanceOf[JSpinner].getValue.toString) } }) case Property(key: String, defaultValue: Boolean) => component = new JCheckBox() component.asInstanceOf[JCheckBox].setSelected(this.value.asInstanceOf[Boolean]) component.asInstanceOf[JCheckBox].addActionListener(new ActionListener { override def actionPerformed(event: ActionEvent): Unit = { value = component.asInstanceOf[JCheckBox].isSelected.asInstanceOf[T] } }) case Property(key: String, defaultValue: Locale) => component = new JComboBox[Locale](Locale.getAvailableLocales) component.asInstanceOf[JComboBox[Locale]].setSelectedItem(this.value.asInstanceOf[Locale]) component.asInstanceOf[JComboBox[Locale]].addActionListener(new ActionListener { override def actionPerformed(event: ActionEvent): Unit = { value = event.getSource.asInstanceOf[JComboBox[Locale]].getSelectedItem.asInstanceOf[T] } }) case _ => throw new UnsupportedOperationException } component.setEnabled(this.editable) component } }
aleo72/serious_panda.settings
src/main/scala/ua/com/serious_panda/settings/Property.scala
Scala
apache-2.0
8,746
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.performance.simulation import io.gatling.core.Predef._ import org.slf4j.{Logger, LoggerFactory} import uk.gov.hmrc.performance.conf.HttpConfiguration /** Trait extending `io.gatling.core.scenario.Simulation`. Use within a performance test to set up Journeys and invoke * `runSimulation()`, a method to configure the Simulation setup. */ trait PerformanceTestRunner extends Simulation with HttpConfiguration with JourneySetup { private val logger: Logger = LoggerFactory.getLogger(classOf[PerformanceTestRunner]) /** Configures `io.gatling.core.scenario.Simulation.setUp`. This method is invoked from within a performance test. * * For smoke tests i.e when `uk.gov.hmrc.performance.conf.PerftestConfiguration.runSingleUserJourney`` is `true`, * the setUp is configured to run only for 1 user. * * For a full test, the setUp is configured with: * * - Journeys and the load to run * - Duration of the run * - Protocol configuration and * - Assertions */ def runSimulation(): Unit = { logger.info(s"Setting up simulation ") if (runSingleUserJourney) { logger.info( s"'perftest.runSmokeTest' is set to true, ignoring all loads and running with only one user per journey!" ) val injectedBuilders = journeys.map { scenario => scenario.builder.inject(atOnceUsers(1)) } setUp(injectedBuilders: _*) .protocols(httpProtocol) .assertions(global.failedRequests.count.is(0)) } else { setUp(withInjectedLoad(journeys): _*) .protocols(httpProtocol) .assertions(global.failedRequests.percent.lte(percentageFailureThreshold)) .assertions(forAll.failedRequests.percent.lte(requestPercentageFailureThreshold)) } } }
hmrc/performance-test-runner
src/main/scala/uk/gov/hmrc/performance/simulation/PerformanceTestRunner.scala
Scala
apache-2.0
2,400
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package connectors import config.ApplicationConfig import models.notifications._ import org.joda.time.{DateTime, DateTimeZone} import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatest.concurrent.ScalaFutures import org.scalatestplus.mockito.MockitoSugar import org.scalatestplus.play.PlaySpec import uk.gov.hmrc.http._ import uk.gov.hmrc.http.HttpClient import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class AmlsNotificationConnectorSpec extends PlaySpec with MockitoSugar with ScalaFutures { val safeId = "SAFEID" val accountTypeId = ("org","id") val amlsRegistrationNumber = "amlsRefNumber" val dateTime = new DateTime(1479730062573L, DateTimeZone.UTC) implicit val hc = HeaderCarrier() private trait Fixture { val connector = new AmlsNotificationConnector (mock[HttpClient], mock[ApplicationConfig]) } "AmlsNotificationConnector" must { "retrieve notifications" when { "given amlsRegNo" in new Fixture { val response = Seq( NotificationRow(None, None, None, true, new DateTime(1981, 12, 1, 1, 3, DateTimeZone.UTC), false, "amlsRefNumber", "1", IDType("")) ) when { connector.http.GET[Seq[NotificationRow]](any(), any(), any())(any(), any(), any()) } thenReturn Future.successful(response) whenReady(connector.fetchAllByAmlsRegNo(amlsRegistrationNumber, accountTypeId)) { _ mustBe response } } "given safeId" in new Fixture { val safeId = "AA1234567891234" val response = Seq( NotificationRow(None, None, None, true, new DateTime(1981, 12, 1, 1, 3, DateTimeZone.UTC), false, "XJML00000200000", "1", IDType("")) ) when { connector.http.GET[Seq[NotificationRow]](any(), any(), any())(any(), any(), any()) } thenReturn Future.successful(response) whenReady(connector.fetchAllBySafeId(safeId, accountTypeId)) { _ mustBe response } } } "the call to notification service is successful (using Amls Reg No)" must { "return the response" in new Fixture { when(connector.http.GET[NotificationDetails](any(), any(), any())(any(), any(), any())) .thenReturn(Future.successful(NotificationDetails( Some(ContactType.MindedToReject), Some(Status(Some(StatusType.Approved), Some(RejectedReason.FailedToPayCharges))), Some("Text of the message"), false, dateTime ))) whenReady(connector.getMessageDetailsByAmlsRegNo(amlsRegistrationNumber, "NOTIFICATIONID", accountTypeId)) { result => result must be (Some(NotificationDetails( Some(ContactType.MindedToReject), Some(Status(Some(StatusType.Approved), Some(RejectedReason.FailedToPayCharges))), Some("Text of the message"), false, dateTime ))) } } } "the call to notification service returns a Bad Request" must { "Fail the future with an upstream 5xx exception (using amls reg no)" in new Fixture { when(connector.http.GET[NotificationDetails](any(), any(), any())(any(), any(), any())) .thenReturn(Future.failed(new BadRequestException("GET of blah returned status 400."))) whenReady(connector.getMessageDetailsByAmlsRegNo(amlsRegistrationNumber, "NOTIFICATIONID", accountTypeId).failed) { exception => exception mustBe a[BadRequestException] } } } "the call to notification service returns Not Found (when using amls reg no)" must { "return a None " in new Fixture { when(connector.http.GET[NotificationDetails](any(), any(), any())(any(), any(), any())) .thenReturn(Future.failed(new NotFoundException("GET of blah returned status 404."))) whenReady(connector.getMessageDetailsByAmlsRegNo(amlsRegistrationNumber, "NOTIFICATIONID", accountTypeId)) { result => result must be (None) } } } } }
hmrc/amls-frontend
test/connectors/AmlsNotificationConnectorSpec.scala
Scala
apache-2.0
4,694
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.nn.ops import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest import scala.util.Random class IsFiniteSerialTest extends ModuleSerializationTest { override def test(): Unit = { val isFinite = IsFinite[Float, Float]().setName("isFinite") val input = Tensor[Float](2, 5).apply1(_ => Random.nextFloat()) runSerializationTest(isFinite, input) } }
intel-analytics/BigDL
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/ops/IsFiniteSpec.scala
Scala
apache-2.0
1,082
package edu.gemini.seqexec.server import java.util import java.util.logging.Logger import edu.gemini.seqexec.server.tcs.{BinaryYesNo, BinaryOnOff} import squants.time.Seconds import collection.JavaConversions._ import edu.gemini.seqexec.server.TcsController._ import edu.gemini.epics.acm.{CaAttributeListener, CaService, XMLBuilder} import edu.gemini.spModel.core.Wavelength import squants.space.{Degrees, Microns, Millimeters} import scalaz._ import Scalaz._ import scalaz.concurrent.Task /** * Created by jluhrs on 9/7/15. */ object TcsControllerEpics extends TcsController { private val Log = Logger.getLogger(getClass.getName) import EpicsCodex._ import FollowOption._ import MountGuideOption._ // Code to retrieve the current configuration from TCS. Include a lot of decoders implicit private val decodeMountGuideOption: DecodeEpicsValue[Integer, MountGuideOption] = DecodeEpicsValue((d: Integer) => if (d == 0) MountGuideOff else MountGuideOn) implicit private val decodeM1GuideSource: DecodeEpicsValue[String, M1Source] = DecodeEpicsValue((s: String) => s.trim match { case "PWFS1" => M1Source.PWFS1 case "PWFS2" => M1Source.PWFS2 case "OIWFS" => M1Source.OIWFS case "GAOS" => M1Source.GAOS case _ => M1Source.PWFS1 }) private def decodeM1Guide(r: BinaryOnOff, s: M1Source): M1GuideConfig = if (r == BinaryOnOff.Off) M1GuideOff else M1GuideOn(s) private def decodeGuideSourceOption(s: String): Boolean = s.trim == "ON" implicit private val decodeComaOption: DecodeEpicsValue[String, ComaOption] = DecodeEpicsValue((s: String) => if (s.trim == "Off") ComaOption.ComaOff else ComaOption.ComaOn) private def decodeM2Guide(s: BinaryOnOff, u: ComaOption, v: Set[TipTiltSource]): M2GuideConfig = if (s == BinaryOnOff.Off) M2GuideOff else M2GuideOn(u, v) private def getGuideConfig: TrySeq[GuideConfig] = { for { mountGuide <- TcsEpics.absorbTipTilt.map(decode[Integer, MountGuideOption]) m1Source <- TcsEpics.m1GuideSource.map(decode[String, M1Source]) m1Guide <- TcsEpics.m1Guide.map(decodeM1Guide(_, m1Source)) m2p1Guide <- TcsEpics.m2p1Guide.map(decodeGuideSourceOption) m2p2Guide <- TcsEpics.m2p2Guide.map(decodeGuideSourceOption) m2oiGuide <- TcsEpics.m2oiGuide.map(decodeGuideSourceOption) m2aoGuide <- TcsEpics.m2aoGuide.map(decodeGuideSourceOption) m2Coma <- TcsEpics.comaCorrect.map(decode[String, ComaOption]) m2Guide <- TcsEpics.m2GuideState.map(decodeM2Guide(_, m2Coma, List((m2p1Guide, TipTiltSource.PWFS1), (m2p2Guide, TipTiltSource.PWFS2), (m2oiGuide, TipTiltSource.OIWFS), (m2aoGuide, TipTiltSource.GAOS)).foldLeft(Set[TipTiltSource]())((s: Set[TipTiltSource], v: (Boolean, TipTiltSource)) => if (v._1) s + v._2 else s))) } yield TrySeq(GuideConfig(mountGuide, m1Guide, m2Guide)) }.getOrElse(TrySeq.fail(SeqexecFailure.Unexpected("Unable to read guide configuration from TCS."))) implicit private val decodeBeam: DecodeEpicsValue[String, Beam] = DecodeEpicsValue((op: String) => op match { case "A" => Beam.A case "B" => Beam.B case "C" => Beam.C case _ => Beam.A } ) private def getTelescopeConfig: TrySeq[TelescopeConfig] = { for { xOffsetA <- TcsEpics.xoffsetPoA1 yOffsetA <- TcsEpics.yoffsetPoA1 xOffsetB <- TcsEpics.xoffsetPoB1 yOffsetB <- TcsEpics.yoffsetPoB1 xOffsetC <- TcsEpics.xoffsetPoC1 yOffsetC <- TcsEpics.yoffsetPoC1 wavelengthA <- TcsEpics.sourceAWavelength wavelengthB <- TcsEpics.sourceBWavelength wavelengthC <- TcsEpics.sourceCWavelength m2Beam <- TcsEpics.chopBeam.map(decode[String, Beam]) } yield TrySeq(TelescopeConfig( OffsetA(FocalPlaneOffset(OffsetX(Millimeters[Double](xOffsetA)), OffsetY(Millimeters[Double](yOffsetA)))), OffsetB(FocalPlaneOffset(OffsetX(Millimeters[Double](xOffsetB)), OffsetY(Millimeters[Double](yOffsetB)))), OffsetC(FocalPlaneOffset(OffsetX(Millimeters[Double](xOffsetC)), OffsetY(Millimeters[Double](yOffsetC)))), WavelengthA(Wavelength(Microns[Double](wavelengthA))), WavelengthB(Wavelength(Microns[Double](wavelengthB))), WavelengthC(Wavelength(Microns[Double](wavelengthC))), m2Beam )) }.getOrElse(TrySeq.fail(SeqexecFailure.Unexpected("Unable to read telescope configuration from TCS."))) private def decodeNodChopOption(s: String): Boolean = s.trim == "On" private def getNodChopTrackingConfig(g: TcsEpics.ProbeGuideConfig): Option[NodChopTrackingConfig] = for { aa <- g.nodachopa.map(decodeNodChopOption) ab <- g.nodachopb.map(decodeNodChopOption) ac <- g.nodachopc.map(decodeNodChopOption) ba <- g.nodbchopa.map(decodeNodChopOption) bb <- g.nodbchopb.map(decodeNodChopOption) bc <- g.nodbchopc.map(decodeNodChopOption) ca <- g.nodcchopa.map(decodeNodChopOption) cb <- g.nodcchopb.map(decodeNodChopOption) cc <- g.nodcchopc.map(decodeNodChopOption) // This last production is slightly tricky. o <- if (List(aa, ab, ac, ba, bb, bc, ca, cb, cc).exists(_ == true)) { if (List(aa, bb, cc).forall(_ == true) && List(ab, ac, ba, bc, ca, cb).forall(_ == false)) { Some(NodChopTrackingConfig.Normal) } else { List( (aa, NodChop(Beam.A, Beam.A)), (ab, NodChop(Beam.A, Beam.B)), (ac, NodChop(Beam.A, Beam.C)), (ba, NodChop(Beam.B, Beam.A)), (bb, NodChop(Beam.B, Beam.B)), (bc, NodChop(Beam.B, Beam.C)), (ca, NodChop(Beam.C, Beam.A)), (cb, NodChop(Beam.C, Beam.B)), (cc, NodChop(Beam.C, Beam.C)) ) collect { case (true, a) => a } match { case h :: t => Some(NodChopTrackingConfig.Special(OneAnd(h, t.toSet))) case Nil => None // the list is empty } } } else Some(NodChopTrackingConfig.None) } yield o private def calcProbeTrackingConfig(f: FollowOption, t: NodChopTrackingConfig): ProbeTrackingConfig = (f, t) match { case (_, NodChopTrackingConfig.None) => ProbeTrackingConfig.Off case (FollowOn, NodChopTrackingConfig.Normal) => ProbeTrackingConfig.On(NodChopTrackingConfig.Normal) case (FollowOn, v: NodChopTrackingConfig.Special) => ProbeTrackingConfig.On(v) case _ => ProbeTrackingConfig.Off } implicit private val decodeFollowOption: DecodeEpicsValue[String, FollowOption] = DecodeEpicsValue((s: String) => if (s.trim == "Off") FollowOff else FollowOn) private def getGuidersTrackingConfig: TrySeq[GuidersTrackingConfig] = { for { p1 <- getNodChopTrackingConfig(TcsEpics.pwfs1ProbeGuideConfig) p2 <- getNodChopTrackingConfig(TcsEpics.pwfs2ProbeGuideConfig) oi <- getNodChopTrackingConfig(TcsEpics.oiwfsProbeGuideConfig) p1Follow <- TcsEpics.p1FollowS.map(decode[String, FollowOption]) p2Follow <- TcsEpics.p2FollowS.map(decode[String, FollowOption]) oiFollow <- TcsEpics.oiFollowS.map(decode[String, FollowOption]) } yield TrySeq(GuidersTrackingConfig(ProbeTrackingConfigP1(calcProbeTrackingConfig(p1Follow, p1)), ProbeTrackingConfigP2(calcProbeTrackingConfig(p2Follow, p2)), ProbeTrackingConfigOI(calcProbeTrackingConfig(oiFollow, oi)), ProbeTrackingConfigAO(ProbeTrackingConfig.Off))) }.getOrElse(TrySeq.fail(SeqexecFailure.Unexpected("Unable to read probes guide from TCS."))) implicit private val decodeGuideSensorOption: DecodeEpicsValue[BinaryYesNo, GuiderSensorOption] = DecodeEpicsValue((s: BinaryYesNo) => if (s == BinaryYesNo.No) GuiderSensorOff else GuiderSensorOn) implicit private val decodeAltairSensorOption: DecodeEpicsValue[Double, GuiderSensorOption] = DecodeEpicsValue((s: Double) => if (s == 0.0) GuiderSensorOff else GuiderSensorOn) private def getGuidersEnabled: TrySeq[GuidersEnabled] = { for { p1On <- TcsEpics.pwfs1On.map(decode[BinaryYesNo, GuiderSensorOption]) p2On <- TcsEpics.pwfs2On.map(decode[BinaryYesNo, GuiderSensorOption]) oiOn <- TcsEpics.oiwfsOn.map(decode[BinaryYesNo, GuiderSensorOption]) aoOn <- TcsEpics.aowfsOn.map(decode[Double, GuiderSensorOption]) } yield TrySeq(GuidersEnabled(GuiderSensorOptionP1(p1On), GuiderSensorOptionP2(p2On), GuiderSensorOptionOI(oiOn), GuiderSensorOptionAO(aoOn))) }.getOrElse(TrySeq.fail(SeqexecFailure.Unexpected("Unable to read guider detectors state from TCS."))) // Decoding and encoding the science fold position require some common definitions, therefore I put them inside an // object private object CodexScienceFoldPosition { import LightSource._ import ScienceFoldPosition._ private val AO_PREFIX = "ao2" private val GCAL_PREFIX = "gcal2" // I have the feeling this operation will be needed in other places private def findInstrument(name: String): Instrument = List(GmosSouth).find(x => name.startsWith(x.sfName)).getOrElse(UnknownInstrument) implicit val decodeScienceFoldPosition: DecodeEpicsValue[String, Position] = DecodeEpicsValue((t: String) => if (t.startsWith(AO_PREFIX)) Position(AO, findInstrument(t.substring(AO_PREFIX.length))) else if (t.startsWith(GCAL_PREFIX)) Position(GCAL, findInstrument(t.substring(GCAL_PREFIX.length))) else Position(Sky, findInstrument(t))) implicit val encodeScienceFoldPosition: EncodeEpicsValue[Position, String] = EncodeEpicsValue((a: Position) => a.source match { case Sky => a.sink.sfName case AO => AO_PREFIX + a.sink.sfName case GCAL => GCAL_PREFIX + a.sink.sfName } ) } import CodexScienceFoldPosition._ private def getScienceFoldPosition: Option[ScienceFoldPosition] = for { sfPos <- TcsEpics.sfName.map(decode[String, ScienceFoldPosition.Position]) sfParked <- TcsEpics.sfParked.map { _ != 0 } } yield if (sfParked) ScienceFoldPosition.Parked else sfPos implicit val decodeHwrsPickupPosition: DecodeEpicsValue[String, HrwfsPickupPosition] = DecodeEpicsValue((t: String) => if (t.trim == "IN") HrwfsPickupPosition.IN else HrwfsPickupPosition.OUT) private def getHrwfsPickupPosition: Option[HrwfsPickupPosition] = for { hwPos <- TcsEpics.agHwName.map(decode[String, HrwfsPickupPosition]) hwParked <- TcsEpics.agHwParked.map { _ != 0 } } yield if (hwParked) HrwfsPickupPosition.Parked else hwPos private def getAGConfig: TrySeq[AGConfig] = { for { sf <- getScienceFoldPosition hrwfs <- getHrwfsPickupPosition } yield TrySeq(AGConfig(sf, hrwfs)) }.getOrElse(TrySeq.fail(SeqexecFailure.Unexpected("Unable to read AG state from TCS."))) private def getIAA: TrySeq[InstrumentAlignAngle] = { for { iaa <- TcsEpics.instrAA } yield TrySeq(InstrumentAlignAngle(Degrees[Double](iaa))) }.getOrElse(TrySeq.fail(SeqexecFailure.Unexpected("Unable to read IAA from TCS."))) override def getConfig: SeqAction[TcsConfig] = EitherT ( Task { for { gc <- getGuideConfig tc <- getTelescopeConfig gtc <- getGuidersTrackingConfig ge <- getGuidersEnabled agc <- getAGConfig iaa <- getIAA } yield TcsConfig(gc, tc, gtc, ge, agc, iaa) } ) // Here starts the code that set the TCS configuration. There are a lot of encoders. implicit private val encodeBeam: EncodeEpicsValue[Beam, String] = EncodeEpicsValue((op: Beam) => op match { case Beam.A => "A" case Beam.B => "B" case Beam.C => "C" }) private def setTelescopeConfig(c: TelescopeConfig): SeqAction[Unit] = for { _ <- TcsEpics.offsetACmd.setX(c.offsetA.self.x.self.toMillimeters) _ <- TcsEpics.offsetACmd.setY(c.offsetA.self.y.self.toMillimeters) _ <- TcsEpics.offsetBCmd.setX(c.offsetB.self.x.self.toMillimeters) _ <- TcsEpics.offsetBCmd.setY(c.offsetB.self.y.self.toMillimeters) _ <- TcsEpics.offsetCCmd.setX(c.offsetC.self.x.self.toMillimeters) _ <- TcsEpics.offsetCCmd.setY(c.offsetC.self.y.self.toMillimeters) _ <- TcsEpics.wavelSourceA.setWavel(c.wavelA.self.toMicrons) _ <- TcsEpics.wavelSourceA.setWavel(c.wavelB.self.toMicrons) _ <- TcsEpics.wavelSourceA.setWavel(c.wavelB.self.toMicrons) _ <- TcsEpics.m2Beam.setBeam(encode(c.m2beam)) } yield TrySeq(()) implicit private val encodeNodChopOption: EncodeEpicsValue[NodChopTrackingOption, String] = EncodeEpicsValue { (op: NodChopTrackingOption) => op match { case NodChopTrackingOption.NodChopTrackingOn => "on" case NodChopTrackingOption.NodChopTrackingOff => "off" } } private def setProbeTrackingConfig(s: TcsEpics.ProbeGuideCmd, c: ProbeTrackingConfig) = for { _ <- s.setNodachopa(encode(c.getNodChop.get(NodChop(Beam.A, Beam.A)))) _ <- s.setNodachopb(encode(c.getNodChop.get(NodChop(Beam.A, Beam.B)))) _ <- s.setNodachopc(encode(c.getNodChop.get(NodChop(Beam.A, Beam.C)))) _ <- s.setNodbchopa(encode(c.getNodChop.get(NodChop(Beam.B, Beam.A)))) _ <- s.setNodbchopb(encode(c.getNodChop.get(NodChop(Beam.B, Beam.B)))) _ <- s.setNodbchopc(encode(c.getNodChop.get(NodChop(Beam.B, Beam.C)))) _ <- s.setNodcchopa(encode(c.getNodChop.get(NodChop(Beam.C, Beam.A)))) _ <- s.setNodcchopb(encode(c.getNodChop.get(NodChop(Beam.C, Beam.B)))) _ <- s.setNodcchopc(encode(c.getNodChop.get(NodChop(Beam.C, Beam.C)))) } yield TrySeq(()) private def setGuiderWfs(on: TcsEpics.WfsObserveCmd, off: EpicsCommand, c: GuiderSensorOption): SeqAction[Unit] = c match { case GuiderSensorOff => off.mark case GuiderSensorOn => on.setNoexp(-1) // Set number of exposures to non-stop (-1) } private def setGuidersWfs(c: GuidersEnabled): SeqAction[Unit] = for { _ <- setGuiderWfs(TcsEpics.pwfs1ObserveCmd, TcsEpics.pwfs1StopObserveCmd, c.pwfs1.self) _ <- setGuiderWfs(TcsEpics.pwfs2ObserveCmd, TcsEpics.pwfs2StopObserveCmd, c.pwfs2.self) _ <- setGuiderWfs(TcsEpics.oiwfsObserveCmd, TcsEpics.oiwfsStopObserveCmd, c.oiwfs.self) } yield TrySeq(()) private def setProbesTrackingConfig(c: GuidersTrackingConfig): SeqAction[Unit] = for { _ <- setProbeTrackingConfig(TcsEpics.pwfs1ProbeGuideCmd, c.pwfs1.self) _ <- setProbeTrackingConfig(TcsEpics.pwfs2ProbeGuideCmd, c.pwfs2.self) _ <- setProbeTrackingConfig(TcsEpics.oiwfsProbeGuideCmd, c.oiwfs.self) } yield TrySeq(()) def setScienceFoldConfig(sfPos: ScienceFoldPosition): SeqAction[Unit] = sfPos match { case ScienceFoldPosition.Parked => TcsEpics.scienceFoldParkCmd.mark case p: ScienceFoldPosition.Position => TcsEpics.scienceFoldPosCmd.setScfold(encode(p)) } implicit private val encodeHrwfsPickupPosition: EncodeEpicsValue[HrwfsPickupPosition, String] = EncodeEpicsValue((op: HrwfsPickupPosition) => op match { case HrwfsPickupPosition.IN => "IN" case HrwfsPickupPosition.OUT => "OUT" case HrwfsPickupPosition.Parked => "park-pos." }) def setHRPickupConfig(hrwfsPos: HrwfsPickupPosition): SeqAction[Unit] = hrwfsPos match { case HrwfsPickupPosition.Parked => TcsEpics.hrwfsParkCmd.mark case _ => TcsEpics.hrwfsPosCmd.setHrwfsPos(encode(hrwfsPos)) } private def setAGConfig(c: AGConfig): SeqAction[Unit] = for { _ <- setScienceFoldConfig(c.sfPos) _ <- setHRPickupConfig(c.hrwfsPos) } yield TrySeq(()) implicit private val encodeMountGuideConfig: EncodeEpicsValue[MountGuideOption, String] = EncodeEpicsValue((op: MountGuideOption) => op match { case MountGuideOn => "on" case MountGuideOff => "off" }) private def setMountGuide(c: MountGuideOption): SeqAction[Unit] = TcsEpics.mountGuideCmd.setMode(encode(c)) implicit private val encodeM1GuideConfig: EncodeEpicsValue[M1GuideConfig, String] = EncodeEpicsValue((op: M1GuideConfig) => op match { case M1GuideOn(_) => "on" case M1GuideOff => "off" }) private def setM1Guide(c: M1GuideConfig): SeqAction[Unit] = TcsEpics.m1GuideCmd.setState(encode(c)) implicit private val encodeM2GuideConfig: EncodeEpicsValue[M2GuideConfig, String] = EncodeEpicsValue((op: M2GuideConfig) => op match { case M2GuideOn(_, _) => "on" case M2GuideOff => "off" }) private def setM2Guide(c: M2GuideConfig): SeqAction[Unit] = TcsEpics.m2GuideCmd.setState(encode(c)) implicit private val decodeInPosition: DecodeEpicsValue[String, Boolean] = DecodeEpicsValue(x => x.trim == "TRUE") override def applyConfig(tc: TelescopeConfig, gtc: GuidersTrackingConfig, ge: GuidersEnabled, agc: AGConfig): SeqAction[Unit] = for { _ <- setTelescopeConfig(tc) _ <- setProbesTrackingConfig(gtc) _ <- setGuidersWfs(ge) _ <- setAGConfig(agc) _ <- TcsEpics.post _ <- EitherT(Task(Log.info("TCS configuration command post").right)) _ <- TcsEpics.waitInPosition(Seconds(30)) _ <- EitherT(Task(Log.info("TCS inposition").right)) } yield TrySeq(()) override def guide(gc: GuideConfig): SeqAction[Unit] = for { _ <- setMountGuide(gc.mountGuide) _ <- setM1Guide(gc.m1Guide) _ <- setM2Guide(gc.m2Guide) _ <- TcsEpics.post } yield TrySeq(()) }
arturog8m/ocs
bundle/edu.gemini.seqexec.server/src/main/scala/edu/gemini/seqexec/server/TcsControllerEpics.scala
Scala
bsd-3-clause
17,158
package org.allenai.common import scala.collection.Iterator import scala.io.{ Codec, Source } import java.io.InputStream import java.nio.{ ByteBuffer, CharBuffer } /** Input stream wrapping a Source object, using the codec to convert characters to bytes. Not * thread-safe. */ class SourceInputStream(val source: Source)(implicit codec: Codec) extends InputStream { /** Buffer to write (potentially multi-byte) character encodings to. */ private val outputBuffer = ByteBuffer.allocate(codec.encoder.maxBytesPerChar.ceil.toInt) /** Number of bytes left in our output buffer. */ private var availableBytes = 0 /** Buffer to re-use when passing characters to our encoder. */ private val charBuffer = Array[Char](1) override def read: Int = { // If we have no available bytes read, but we have characters in our Source, read the next // character into our byte array. if (availableBytes <= 0 && source.hasNext) { readNextChar() } // At this point, if we have no bytes, we are at the end of our stream. if (availableBytes <= 0) { -1 } else { availableBytes -= 1 outputBuffer.get() } } /** Reads the next character from the underlying source, encodes it into `outputBuffer`, and sets * `availableBytes` to the number of bytes written. */ private def readNextChar(): Unit = { // Reset the buffer before writing. outputBuffer.rewind() // Read & encode the result. charBuffer(0) = source.next() val result = codec.encoder.encode(CharBuffer.wrap(charBuffer), outputBuffer, false) if (result.isOverflow) { // Shouldn't happen unless there's a bug in the codec (the output buffer should always have // enough room). result.throwException() } // Set the availble bytes & reset the buffer for read. availableBytes = outputBuffer.position outputBuffer.rewind() } }
ryanai3/common
core/src/main/scala/org/allenai/common/SourceInputStream.scala
Scala
apache-2.0
1,905
import scala.language.existentials import scala.reflect.runtime.universe._ import internal._ object Test { trait ToS { final override def toString = getClass.getName } def f1 = { case class Bar() extends ToS; Bar } def f2 = { case class Bar() extends ToS; Bar() } def f3 = { class Bar() extends ToS; object Bar extends ToS; Bar } def f4 = { class Bar() extends ToS; new Bar() } def f5 = { object Bar extends ToS; Bar } def f6 = { () => { object Bar extends ToS ; Bar } } def f7 = { val f = { () => { object Bar extends ToS ; Bar } } ; f } def f8 = { trait A ; trait B extends A ; class C extends B with ToS; new C { } } def f9 = { trait A ; trait B ; class C extends B with A with ToS; new C { } } def f10 = { class A { type T1 } ; List[A#T1]() } def f11 = { abstract class A extends Seq[Int] ; List[A]() } def f12 = { abstract class A extends Seq[U forSome { type U <: Int }] ; List[A]() } val g1 = { case class Bar() extends ToS; Bar } val g2 = { case class Bar() extends ToS; Bar() } val g3 = { class Bar() extends ToS; object Bar extends ToS; Bar } val g4 = { class Bar() extends ToS; new Bar() } val g5 = { object Bar extends ToS; Bar } val g6 = { () => { object Bar extends ToS ; Bar } } val g7 = { val f = { () => { object Bar extends ToS ; Bar } } ; f } val g8 = { trait A ; trait B extends A ; class C extends B with ToS; new C { } } val g9 = { trait A ; trait B ; class C extends B with A with ToS; new C { } } val g10 = { class A { type T1 } ; List[A#T1]() } val g11 = { abstract class A extends Seq[Int] ; List[A]() } val g12 = { abstract class A extends Seq[U forSome { type U <: Int }] ; List[A]() } def printTpe(t: Type) = { val s = if (isFreeType(t.typeSymbol)) t.typeSymbol.info.toString else t.typeSymbol.toString println("%s, t=%s, s=%s".format(t, t.asInstanceOf[Product].productPrefix, s)) } def m[T: TypeTag](x: T) = printTpe(typeOf[T]) def m2[T: WeakTypeTag](x: T) = printTpe(implicitly[WeakTypeTag[T]].tpe) // tags do work for f10/g10 def main(args: Array[String]): Unit = { m2(f1) m2(f2) m(f3) m(f4) m(f5) m(f6) m(f7) m2(f8) m2(f9) m2(f10) m(f11) m(f12) m2(g1) m2(g2) m(g3) m(g4) m(g5) m(g6) m(g7) m2(g8) m2(g9) m2(g10) m(g11) m(g12) } } object Misc { trait Bippy { def bippy = "I'm Bippy!" } object o1 { def f1 = { trait A extends Seq[U forSome { type U <: Misc.Bippy }] ; abstract class B extends A ; trait C extends B ; (null: C) } def f2 = f1.head.bippy } def g1 = o1.f1 _ def g2 = o1.f2 _ }
yusuke2255/dotty
tests/pending/run/existentials3-new.scala
Scala
bsd-3-clause
2,616
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.nodes.physical.batch import org.apache.flink.table.planner.calcite.FlinkTypeFactory import org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecPythonCorrelate import org.apache.flink.table.planner.plan.nodes.exec.{ExecEdge, ExecNode} import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalTableFunctionScan import org.apache.flink.table.planner.plan.utils.JoinTypeUtil import org.apache.calcite.plan.{RelOptCluster, RelTraitSet} import org.apache.calcite.rel.RelNode import org.apache.calcite.rel.`type`.RelDataType import org.apache.calcite.rel.core.{Correlate, JoinRelType} import org.apache.calcite.rex.{RexCall, RexNode} /** * Batch physical RelNode for [[Correlate]] (Python user defined table function). */ class BatchPhysicalPythonCorrelate( cluster: RelOptCluster, traitSet: RelTraitSet, inputRel: RelNode, scan: FlinkLogicalTableFunctionScan, condition: Option[RexNode], outputRowType: RelDataType, joinType: JoinRelType) extends BatchPhysicalCorrelateBase( cluster, traitSet, inputRel, scan, condition, outputRowType, joinType) { def copy( traitSet: RelTraitSet, child: RelNode, outputType: RelDataType): RelNode = { new BatchPhysicalPythonCorrelate( cluster, traitSet, child, scan, condition, outputType, joinType) } override def translateToExecNode(): ExecNode[_] = { new BatchExecPythonCorrelate( JoinTypeUtil.getFlinkJoinType(joinType), scan.getCall.asInstanceOf[RexCall], condition.orNull, ExecEdge.DEFAULT, FlinkTypeFactory.toLogicalRowType(getRowType), getRelDetailedDescription ) } }
aljoscha/flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalPythonCorrelate.scala
Scala
apache-2.0
2,559
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.{Properties, Timer, TimerTask} import scala.concurrent.TimeoutException import scala.concurrent.duration._ import org.apache.spark.annotation.{Experimental, Since} import org.apache.spark.executor.TaskMetrics import org.apache.spark.internal.Logging import org.apache.spark.memory.TaskMemoryManager import org.apache.spark.metrics.source.Source import org.apache.spark.resource.ResourceInformation import org.apache.spark.rpc.{RpcEndpointRef, RpcTimeout} import org.apache.spark.shuffle.FetchFailedException import org.apache.spark.util._ /** * :: Experimental :: * A [[TaskContext]] with extra contextual info and tooling for tasks in a barrier stage. * Use [[BarrierTaskContext#get]] to obtain the barrier context for a running barrier task. */ @Experimental @Since("2.4.0") class BarrierTaskContext private[spark] ( taskContext: TaskContext) extends TaskContext with Logging { import BarrierTaskContext._ // Find the driver side RPCEndpointRef of the coordinator that handles all the barrier() calls. private val barrierCoordinator: RpcEndpointRef = { val env = SparkEnv.get RpcUtils.makeDriverRef("barrierSync", env.conf, env.rpcEnv) } // Local barrierEpoch that identify a barrier() call from current task, it shall be identical // with the driver side epoch. private var barrierEpoch = 0 // Number of tasks of the current barrier stage, a barrier() call must collect enough requests // from different tasks within the same barrier stage attempt to succeed. private lazy val numTasks = getTaskInfos().size /** * :: Experimental :: * Sets a global barrier and waits until all tasks in this stage hit this barrier. Similar to * MPI_Barrier function in MPI, the barrier() function call blocks until all tasks in the same * stage have reached this routine. * * CAUTION! In a barrier stage, each task must have the same number of barrier() calls, in all * possible code branches. Otherwise, you may get the job hanging or a SparkException after * timeout. Some examples of '''misuses''' are listed below: * 1. Only call barrier() function on a subset of all the tasks in the same barrier stage, it * shall lead to timeout of the function call. * {{{ * rdd.barrier().mapPartitions { iter => * val context = BarrierTaskContext.get() * if (context.partitionId() == 0) { * // Do nothing. * } else { * context.barrier() * } * iter * } * }}} * * 2. Include barrier() function in a try-catch code block, this may lead to timeout of the * second function call. * {{{ * rdd.barrier().mapPartitions { iter => * val context = BarrierTaskContext.get() * try { * // Do something that might throw an Exception. * doSomething() * context.barrier() * } catch { * case e: Exception => logWarning("...", e) * } * context.barrier() * iter * } * }}} */ @Experimental @Since("2.4.0") def barrier(): Unit = { logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) has entered " + s"the global sync, current barrier epoch is $barrierEpoch.") logTrace("Current callSite: " + Utils.getCallSite()) val startTime = System.currentTimeMillis() val timerTask = new TimerTask { override def run(): Unit = { logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) waiting " + s"under the global sync since $startTime, has been waiting for " + s"${MILLISECONDS.toSeconds(System.currentTimeMillis() - startTime)} seconds, " + s"current barrier epoch is $barrierEpoch.") } } // Log the update of global sync every 60 seconds. timer.schedule(timerTask, 60000, 60000) try { val abortableRpcFuture = barrierCoordinator.askAbortable[Unit]( message = RequestToSync(numTasks, stageId, stageAttemptNumber, taskAttemptId, barrierEpoch), // Set a fixed timeout for RPC here, so users shall get a SparkException thrown by // BarrierCoordinator on timeout, instead of RPCTimeoutException from the RPC framework. timeout = new RpcTimeout(365.days, "barrierTimeout")) // Wait the RPC future to be completed, but every 1 second it will jump out waiting // and check whether current spark task is killed. If killed, then throw // a `TaskKilledException`, otherwise continue wait RPC until it completes. try { while (!abortableRpcFuture.toFuture.isCompleted) { // wait RPC future for at most 1 second try { ThreadUtils.awaitResult(abortableRpcFuture.toFuture, 1.second) } catch { case _: TimeoutException | _: InterruptedException => // If `TimeoutException` thrown, waiting RPC future reach 1 second. // If `InterruptedException` thrown, it is possible this task is killed. // So in this two cases, we should check whether task is killed and then // throw `TaskKilledException` taskContext.killTaskIfInterrupted() } } } finally { abortableRpcFuture.abort(taskContext.getKillReason().getOrElse("Unknown reason.")) } barrierEpoch += 1 logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) finished " + "global sync successfully, waited for " + s"${MILLISECONDS.toSeconds(System.currentTimeMillis() - startTime)} seconds, " + s"current barrier epoch is $barrierEpoch.") } catch { case e: SparkException => logInfo(s"Task $taskAttemptId from Stage $stageId(Attempt $stageAttemptNumber) failed " + "to perform global sync, waited for " + s"${MILLISECONDS.toSeconds(System.currentTimeMillis() - startTime)} seconds, " + s"current barrier epoch is $barrierEpoch.") throw e } finally { timerTask.cancel() timer.purge() } } /** * :: Experimental :: * Returns [[BarrierTaskInfo]] for all tasks in this barrier stage, ordered by partition ID. */ @Experimental @Since("2.4.0") def getTaskInfos(): Array[BarrierTaskInfo] = { val addressesStr = Option(taskContext.getLocalProperty("addresses")).getOrElse("") addressesStr.split(",").map(_.trim()).map(new BarrierTaskInfo(_)) } // delegate methods override def isCompleted(): Boolean = taskContext.isCompleted() override def isInterrupted(): Boolean = taskContext.isInterrupted() override def addTaskCompletionListener(listener: TaskCompletionListener): this.type = { taskContext.addTaskCompletionListener(listener) this } override def addTaskFailureListener(listener: TaskFailureListener): this.type = { taskContext.addTaskFailureListener(listener) this } override def stageId(): Int = taskContext.stageId() override def stageAttemptNumber(): Int = taskContext.stageAttemptNumber() override def partitionId(): Int = taskContext.partitionId() override def attemptNumber(): Int = taskContext.attemptNumber() override def taskAttemptId(): Long = taskContext.taskAttemptId() override def getLocalProperty(key: String): String = taskContext.getLocalProperty(key) override def taskMetrics(): TaskMetrics = taskContext.taskMetrics() override def getMetricsSources(sourceName: String): Seq[Source] = { taskContext.getMetricsSources(sourceName) } override def resources(): Map[String, ResourceInformation] = taskContext.resources() override private[spark] def killTaskIfInterrupted(): Unit = taskContext.killTaskIfInterrupted() override private[spark] def getKillReason(): Option[String] = taskContext.getKillReason() override private[spark] def taskMemoryManager(): TaskMemoryManager = { taskContext.taskMemoryManager() } override private[spark] def registerAccumulator(a: AccumulatorV2[_, _]): Unit = { taskContext.registerAccumulator(a) } override private[spark] def setFetchFailed(fetchFailed: FetchFailedException): Unit = { taskContext.setFetchFailed(fetchFailed) } override private[spark] def markInterrupted(reason: String): Unit = { taskContext.markInterrupted(reason) } override private[spark] def markTaskFailed(error: Throwable): Unit = { taskContext.markTaskFailed(error) } override private[spark] def markTaskCompleted(error: Option[Throwable]): Unit = { taskContext.markTaskCompleted(error) } override private[spark] def fetchFailed: Option[FetchFailedException] = { taskContext.fetchFailed } override private[spark] def getLocalProperties: Properties = taskContext.getLocalProperties } @Experimental @Since("2.4.0") object BarrierTaskContext { /** * :: Experimental :: * Returns the currently active BarrierTaskContext. This can be called inside of user functions to * access contextual information about running barrier tasks. */ @Experimental @Since("2.4.0") def get(): BarrierTaskContext = TaskContext.get().asInstanceOf[BarrierTaskContext] private val timer = new Timer("Barrier task timer for barrier() calls.") }
pgandhi999/spark
core/src/main/scala/org/apache/spark/BarrierTaskContext.scala
Scala
apache-2.0
10,073
package com.soteradefense.dga.graphx.louvain import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import org.apache.spark.graphx._ import scala.reflect.ClassTag import org.apache.spark.broadcast.Broadcast import org.apache.spark.graphx.Graph.graphToGraphOps import scala.math.BigDecimal.double2bigDecimal /** * Provides low level louvain community detection algorithm functions. Generally used by LouvainHarness * to coordinate the correct execution of the algorithm though its several stages. * * For details on the sequential algorithm see: Fast unfolding of communities in large networks, Blondel 2008 */ object LouvainCore { /** * Generates a new graph of type Graph[VertexState,Long] based on an input graph of type. * Graph[VD,Long]. The resulting graph can be used for louvain computation. * */ def createLouvainGraph[VD: ClassTag](graph: Graph[VD,Long]) : Graph[VertexState,Long]= { // Create the initial Louvain graph. val nodeWeightMapFunc = (e:EdgeTriplet[VD,Long]) => Iterator((e.srcId,e.attr), (e.dstId,e.attr)) val nodeWeightReduceFunc = (e1:Long,e2:Long) => e1+e2 val nodeWeights = graph.mapReduceTriplets(nodeWeightMapFunc,nodeWeightReduceFunc) val louvainGraph = graph.outerJoinVertices(nodeWeights)((vid,data,weightOption)=> { val weight = weightOption.getOrElse(0L) val state = new VertexState() state.community = vid state.changed = false state.communitySigmaTot = weight state.internalWeight = 0L state.nodeWeight = weight state }).partitionBy(PartitionStrategy.EdgePartition2D).groupEdges(_+_) return louvainGraph } /** * Transform a graph from [VD,Long] to a a [VertexState,Long] graph and label each vertex with a community * to maximize global modularity (without compressing the graph) */ def louvainFromStandardGraph[VD: ClassTag](sc:SparkContext,graph:Graph[VD,Long], minProgress:Int=1,progressCounter:Int=1) : (Double,Graph[VertexState,Long],Int) = { val louvainGraph = createLouvainGraph(graph) return louvain(sc,louvainGraph,minProgress,progressCounter) } /** * For a graph of type Graph[VertexState,Long] label each vertex with a community to maximize global modularity. * (without compressing the graph) */ def louvain(sc:SparkContext, graph:Graph[VertexState,Long], minProgress:Int=1,progressCounter:Int=1) : (Double,Graph[VertexState,Long],Int)= { var louvainGraph = graph.cache() val graphWeight = louvainGraph.vertices.values.map(vdata=> vdata.internalWeight+vdata.nodeWeight).reduce(_+_) var totalGraphWeight = sc.broadcast(graphWeight) println("totalEdgeWeight: "+totalGraphWeight.value) // gather community information from each vertex's local neighborhood var msgRDD = louvainGraph.mapReduceTriplets(sendMsg,mergeMsg) var activeMessages = msgRDD.count() //materializes the msgRDD and caches it in memory var updated = 0L - minProgress var even = false var count = 0 val maxIter = 100000 var stop = 0 var updatedLastPhase = 0L do { count += 1 even = ! even // label each vertex with its best community based on neighboring community information val labeledVerts = louvainVertJoin(louvainGraph,msgRDD,totalGraphWeight,even).cache() // calculate new sigma total value for each community (total weight of each community) val communtiyUpdate = labeledVerts .map( {case (vid,vdata) => (vdata.community,vdata.nodeWeight+vdata.internalWeight)}) .reduceByKey(_+_).cache() // map each vertex ID to its updated community information val communityMapping = labeledVerts .map( {case (vid,vdata) => (vdata.community,vid)}) .join(communtiyUpdate) .map({case (community,(vid,sigmaTot)) => (vid,(community,sigmaTot)) }) .cache() // join the community labeled vertices with the updated community info val updatedVerts = labeledVerts.join(communityMapping).map({ case (vid,(vdata,communityTuple) ) => vdata.community = communityTuple._1 vdata.communitySigmaTot = communityTuple._2 (vid,vdata) }).cache() updatedVerts.count() labeledVerts.unpersist(blocking = false) communtiyUpdate.unpersist(blocking=false) communityMapping.unpersist(blocking=false) val prevG = louvainGraph louvainGraph = louvainGraph.outerJoinVertices(updatedVerts)((vid, old, newOpt) => newOpt.getOrElse(old)) louvainGraph.cache() // gather community information from each vertex's local neighborhood val oldMsgs = msgRDD msgRDD = louvainGraph.mapReduceTriplets(sendMsg, mergeMsg).cache() activeMessages = msgRDD.count() // materializes the graph by forcing computation oldMsgs.unpersist(blocking=false) updatedVerts.unpersist(blocking=false) prevG.unpersistVertices(blocking=false) // half of the communites can swtich on even cycles // and the other half on odd cycles (to prevent deadlocks) // so we only want to look for progess on odd cycles (after all vertcies have had a chance to move) if (even) updated = 0 updated = updated + louvainGraph.vertices.filter(_._2.changed).count if (!even) { println(" # vertices moved: "+java.text.NumberFormat.getInstance().format(updated)) if (updated >= updatedLastPhase - minProgress) stop += 1 updatedLastPhase = updated } } while ( stop <= progressCounter && (even || (updated > 0 && count < maxIter))) println("\\nCompleted in "+count+" cycles") // Use each vertex's neighboring community data to calculate the global modularity of the graph val newVerts = louvainGraph.vertices.innerJoin(msgRDD)((vid,vdata,msgs)=> { // sum the nodes internal weight and all of its edges that are in its community val community = vdata.community var k_i_in = vdata.internalWeight var sigmaTot = vdata.communitySigmaTot.toDouble msgs.foreach({ case( (communityId,sigmaTotal),communityEdgeWeight ) => if (vdata.community == communityId) k_i_in += communityEdgeWeight}) val M = totalGraphWeight.value val k_i = vdata.nodeWeight + vdata.internalWeight var q = (k_i_in.toDouble / M) - ( ( sigmaTot *k_i) / math.pow(M, 2) ) //println(s"vid: $vid community: $community $q = ($k_i_in / $M) - ( ($sigmaTot * $k_i) / math.pow($M, 2) )") if (q < 0) 0 else q }) val actualQ = newVerts.values.reduce(_+_) // return the modularity value of the graph along with the // graph. vertices are labeled with their community return (actualQ,louvainGraph,count/2) } /** * Creates the messages passed between each vertex to convey neighborhood community data. */ private def sendMsg(et:EdgeTriplet[VertexState,Long]) = { val m1 = (et.dstId,Map((et.srcAttr.community,et.srcAttr.communitySigmaTot)->et.attr)) val m2 = (et.srcId,Map((et.dstAttr.community,et.dstAttr.communitySigmaTot)->et.attr)) Iterator(m1, m2) } /** * Merge neighborhood community data into a single message for each vertex */ private def mergeMsg(m1:Map[(Long,Long),Long],m2:Map[(Long,Long),Long]) ={ val newMap = scala.collection.mutable.HashMap[(Long,Long),Long]() m1.foreach({case (k,v)=> if (newMap.contains(k)) newMap(k) = newMap(k) + v else newMap(k) = v }) m2.foreach({case (k,v)=> if (newMap.contains(k)) newMap(k) = newMap(k) + v else newMap(k) = v }) newMap.toMap } /** * Join vertices with community data form their neighborhood and select the best community for each vertex to maximize change in modularity. * Returns a new set of vertices with the updated vertex state. */ private def louvainVertJoin(louvainGraph:Graph[VertexState,Long], msgRDD:VertexRDD[Map[(Long,Long),Long]], totalEdgeWeight:Broadcast[Long], even:Boolean) = { louvainGraph.vertices.innerJoin(msgRDD)( (vid, vdata, msgs)=> { var bestCommunity = vdata.community var startingCommunityId = bestCommunity var maxDeltaQ = BigDecimal(0.0); var bestSigmaTot = 0L msgs.foreach({ case( (communityId,sigmaTotal),communityEdgeWeight ) => val deltaQ = q(startingCommunityId, communityId, sigmaTotal, communityEdgeWeight, vdata.nodeWeight, vdata.internalWeight,totalEdgeWeight.value) //println(" communtiy: "+communityId+" sigma:"+sigmaTotal+" edgeweight:"+communityEdgeWeight+" q:"+deltaQ) if (deltaQ > maxDeltaQ || (deltaQ > 0 && (deltaQ == maxDeltaQ && communityId > bestCommunity))){ maxDeltaQ = deltaQ bestCommunity = communityId bestSigmaTot = sigmaTotal } }) // only allow changes from low to high communties on even cyces and high to low on odd cycles if ( vdata.community != bestCommunity && ( (even && vdata.community > bestCommunity) || (!even && vdata.community < bestCommunity) ) ){ //println(" "+vid+" SWITCHED from "+vdata.community+" to "+bestCommunity) vdata.community = bestCommunity vdata.communitySigmaTot = bestSigmaTot vdata.changed = true } else{ vdata.changed = false } vdata }) } /** * Returns the change in modularity that would result from a vertex moving to a specified community. */ private def q(currCommunityId:Long, testCommunityId:Long, testSigmaTot:Long, edgeWeightInCommunity:Long, nodeWeight:Long, internalWeight:Long, totalEdgeWeight:Long) : BigDecimal = { val isCurrentCommunity = (currCommunityId.equals(testCommunityId)); val M = BigDecimal(totalEdgeWeight); val k_i_in_L = if (isCurrentCommunity) edgeWeightInCommunity + internalWeight else edgeWeightInCommunity; val k_i_in = BigDecimal(k_i_in_L); val k_i = BigDecimal(nodeWeight + internalWeight); val sigma_tot = if (isCurrentCommunity) BigDecimal(testSigmaTot) - k_i else BigDecimal(testSigmaTot); var deltaQ = BigDecimal(0.0); if (!(isCurrentCommunity && sigma_tot.equals(0.0))) { deltaQ = k_i_in - ( k_i * sigma_tot / M) //println(s" $deltaQ = $k_i_in - ( $k_i * $sigma_tot / $M") } return deltaQ; } /** * Compress a graph by its communities, aggregate both internal node weights and edge * weights within communities. */ def compressGraph(graph:Graph[VertexState,Long],debug:Boolean=true) : Graph[VertexState,Long] = { // aggregate the edge weights of self loops. edges with both src and dst in the same community. // WARNING can not use graph.mapReduceTriplets because we are mapping to new vertexIds val internalEdgeWeights = graph.triplets.flatMap(et=>{ if (et.srcAttr.community == et.dstAttr.community){ Iterator( ( et.srcAttr.community, 2*et.attr) ) // count the weight from both nodes // count the weight from both nodes } else Iterator.empty }).reduceByKey(_+_) // aggregate the internal weights of all nodes in each community var internalWeights = graph.vertices.values.map(vdata=> (vdata.community,vdata.internalWeight)).reduceByKey(_+_) // join internal weights and self edges to find new interal weight of each community val newVerts = internalWeights.leftOuterJoin(internalEdgeWeights).map({case (vid,(weight1,weight2Option)) => val weight2 = weight2Option.getOrElse(0L) val state = new VertexState() state.community = vid state.changed = false state.communitySigmaTot = 0L state.internalWeight = weight1+weight2 state.nodeWeight = 0L (vid,state) }).cache() // translate each vertex edge to a community edge val edges = graph.triplets.flatMap(et=> { val src = math.min(et.srcAttr.community,et.dstAttr.community) val dst = math.max(et.srcAttr.community,et.dstAttr.community) if (src != dst) Iterator(new Edge(src, dst, et.attr)) else Iterator.empty }).cache() // generate a new graph where each community of the previous // graph is now represented as a single vertex val compressedGraph = Graph(newVerts,edges) .partitionBy(PartitionStrategy.EdgePartition2D).groupEdges(_+_) // calculate the weighted degree of each node val nodeWeightMapFunc = (e:EdgeTriplet[VertexState,Long]) => Iterator((e.srcId,e.attr), (e.dstId,e.attr)) val nodeWeightReduceFunc = (e1:Long,e2:Long) => e1+e2 val nodeWeights = compressedGraph.mapReduceTriplets(nodeWeightMapFunc,nodeWeightReduceFunc) // fill in the weighted degree of each node // val louvainGraph = compressedGraph.joinVertices(nodeWeights)((vid,data,weight)=> { val louvainGraph = compressedGraph.outerJoinVertices(nodeWeights)((vid,data,weightOption)=> { val weight = weightOption.getOrElse(0L) data.communitySigmaTot = weight +data.internalWeight data.nodeWeight = weight data }).cache() louvainGraph.vertices.count() louvainGraph.triplets.count() // materialize the graph newVerts.unpersist(blocking=false) edges.unpersist(blocking=false) return louvainGraph } // debug printing private def printlouvain(graph:Graph[VertexState,Long]) = { print("\\ncommunity label snapshot\\n(vid,community,sigmaTot)\\n") graph.vertices.mapValues((vid,vdata)=> (vdata.community,vdata.communitySigmaTot)).collect().foreach(f=>println(" "+f)) } // debug printing private def printedgetriplets(graph:Graph[VertexState,Long]) = { print("\\ncommunity label snapshot FROM TRIPLETS\\n(vid,community,sigmaTot)\\n") (graph.triplets.flatMap(e=> Iterator((e.srcId,e.srcAttr.community,e.srcAttr.communitySigmaTot), (e.dstId,e.dstAttr.community,e.dstAttr.communitySigmaTot))).collect()).foreach(f=>println(" "+f)) } }
623401157/spark-distributed-louvain-modularity
dga-graphx/src/main/scala/com/soteradefense/dga/graphx/louvain/LouvainCore.scala
Scala
apache-2.0
13,983
package io.youi.font import io.youi.drawable.{Context, TextDrawable} import io.youi.paint.{Paint, Stroke} import io.youi.spatial.BoundingBox trait Text { def font: Font def text: String def size: Double def maxWidth: Double def kerning: Boolean def boundingBox: BoundingBox def lineHeight: Double = font.lineHeight(size) def draw(context: Context, x: Double, y: Double, fill: Paint, stroke: Stroke): Unit def toDrawable(fill: Paint = Paint.none, stroke: Stroke = Stroke.none): TextDrawable = new TextDrawable(this, fill, stroke) } object Text { case object empty extends Text { override def font: Font = Font.empty override def text: String = "" override def size: Double = 0.0 override def maxWidth: Double = 0.0 override def kerning: Boolean = false override def boundingBox: BoundingBox = BoundingBox.zero override def draw(context: Context, x: Double, y: Double, fill: Paint, stroke: Stroke): Unit = {} } }
outr/youi
gui/src/main/scala/io/youi/font/Text.scala
Scala
mit
984
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package org.apache.toree.kernel.interpreter.sparkr import org.apache.toree.interpreter.broker.BrokerProcess import scala.collection.JavaConverters._ /** * Represents the R process used to evaluate SparkR code. * * @param processName The name of the Rscript process to run. * @param sparkRBridge The bridge to use to retrieve kernel output streams * and the Spark version to be verified * @param sparkRProcessHandler The handler to use when the process fails or * completes * @param port The port to provide to the SparkR process to use to connect * back to the JVM */ class SparkRProcess( processName: String, private val sparkRBridge: SparkRBridge, private val sparkRProcessHandler: SparkRProcessHandler, private val port: Int ) extends BrokerProcess( processName = processName, entryResource = "kernelR/sparkr_runner.R", otherResources = Seq("kernelR/sparkr_runner_utils.R"), brokerBridge = sparkRBridge, brokerProcessHandler = sparkRProcessHandler, arguments = Seq( "--default-packages=datasets,utils,grDevices,graphics,stats,methods" ) ) { override val brokerName: String = "SparkR" private val sparkHome = Option(System.getenv("SPARK_HOME")) .orElse(Option(System.getProperty("spark.home"))) assert(sparkHome.nonEmpty, "SparkR process requires Spark Home to be set!") /** * Creates a new process environment to be used for environment variable * retrieval by the new process. * * @return The map of environment variables and their respective values */ override protected def newProcessEnvironment(): Map[String, String] = { val baseEnvironment = super.newProcessEnvironment() // Note: Adding the new map values should override the old ones baseEnvironment ++ Map( "SPARK_HOME" -> sparkHome.get, "EXISTING_SPARKR_BACKEND_PORT" -> port.toString ) } }
Myllyenko/incubator-toree
sparkr-interpreter/src/main/scala/org/apache/toree/kernel/interpreter/sparkr/SparkRProcess.scala
Scala
apache-2.0
2,748
package com.github.j5ik2o.dddbase.dynamodb import monix.eval.Task trait AggregateBaseReadFeature extends AggregateIOBaseFeature { protected def convertToAggregate: RecordType => Task[AggregateType] }
j5ik2o/scala-ddd-base-functional
nosql/dynamodb/src/main/scala/com/github/j5ik2o/dddbase/dynamodb/AggregateBaseReadFeature.scala
Scala
mit
206
import bio._ import org.scalatest.FlatSpec import org.scalatest.matchers.ShouldMatchers package bio.test { import bio.attribute._ import org.scalatest.Matchers class GappedSequenceSpec extends FlatSpec with Matchers { import bio.DNA._ "A DNA GappedSequence" should "instantiate from a String" in { val s = new GappedSequence("agc--taacg---") s.toString should equal ("agc--taacg---") } "A DNA GappedSequence" should "instantiate with an ID" in { val s = new GappedSequence("ID456","agctaacg") s.id should equal ("ID456") } "A DNA GappedSequence" should "instantiate with an ID+Description" in { val s = new GappedSequence("ID456","Gene 456","agctaacg") s.description should equal ("Gene 456") } "An AA GappedSequence" should "instantiate from a String" in { val s = new Protein.GappedSequence("SSI-ISNS---FSRP") s.toString should equal ("SSI-ISNS---FSRP") } "An AA GappedSequence" should "instantiate with an ID" in { val s = new Protein.GappedSequence("ID456","SSI-ISNS---FSRP") s.id should equal ("ID456") } "An AA GappedSequence" should "instantiate with an ID+Description" in { val s = new Protein.GappedSequence("ID456","Gene 456","SSI-ISNS---FSRP") s.description should equal ("Gene 456") } "A GappedCodonSequence" should "instantiate from a String" in { val s = new Protein.GappedCodonSequence("agc---taacgt") s.toString should equal ("S-*R") } "A GappedCodonSequence" should "instantiate with an ID" in { val s = new Protein.GappedCodonSequence("ID456","agctaacg") s.id should equal ("ID456") } "A GappedCodonSequence" should "instantiate with an ID+Description" in { val s = new Protein.GappedCodonSequence("ID456","Gene 456","agctaacg") s.description should equal ("Gene 456") } "A misaligned GappedCodonSequence" should "do something" in { val s = new Protein.GappedCodonSequence("agc----taacgt") s.toString should equal ("S--T") } } }
bioscala/bioscala
src/test/scala/bio/sequence/gappedsequence_spec.scala
Scala
bsd-2-clause
2,059
package com.twitter.finagle.mysql import com.twitter.concurrent.AsyncStream import com.twitter.finagle.Service import com.twitter.finagle.stats.StatsReceiver import com.twitter.logging.Logger import com.twitter.util.{Closable, Future, Time} /** * A closable async stream of projected rows from a CursoredStatement. */ trait CursorResult[T] extends Closable { /** * Initiate the streaming result set. * * @note once `stream` has been called it is critical to either * consume the stream to the end or explicitly call `close()` to * avoid resource leaking. */ def stream: AsyncStream[T] } /** * A CursoredStatement represents a parameterized * sql statement which when applied to parameters yields * a lazy stream of rows. Can be used concurrently. */ trait CursoredStatement { /** * Executes the cursored statement with the given `params` and lazily maps `f` * over the rows as they are streamed from the database. * * @note `rowsPerFetch` should be carefully picked to balance the minimum number of round * trips to the database, and the maximum amount of memory used by an individual fetch. For * example, consider estimating the whole result size with a `select count(...)` first, * and then setting `rowsPerFetch` to `Math.log(count)`. */ def apply[T](rowsPerFetch: Int, params: Parameter*)(f: Row => T): Future[CursorResult[T]] } private object StdCursorResult { val logger = Logger(getClass.getName) val CursorClosedException = new Exception("request attempted against already closed cursor") } private class StdCursorResult[T]( stats: CursorStats, svc: Service[Request, Result], sql: String, rowsPerFetch: Int, params: Seq[Parameter], f: (Row) => T, supportUnsigned: Boolean ) extends CursorResult[T] { self => import StdCursorResult._ // We store the stream state outside of an AsyncStream instance to avoid storing the // head of the stream as a member field. That is, each operation on the `stream` // inside `StdCursorResult` will construct a new AsyncStream (e.g. no vals). This is // important to avoid OOMing during large or infinite streams. sealed trait StreamState case object Init extends StreamState case object Closed extends StreamState case class Preparing(s: AsyncStream[T]) extends StreamState case class Prepared(ok: PrepareOK) extends StreamState case class Fetching(fs: () => AsyncStream[T]) extends StreamState // Thread safety is provided by synchronization on `this`. The assumption is that it's // okay to use a coarse grained lock for to manage state since operations on the // stream should have no (or low) concurrency in the common case. private[this] var state: StreamState = Init private[this] val closeFn: Throwable => Unit = _ => close() private[this] def invoke(req: Request): Future[Result] = self.synchronized { state match { case Closed => Future.exception(CursorClosedException) case _ => svc(req).onFailure(closeFn) } } private[this] def prepare(): AsyncStream[Result] = AsyncStream.fromFuture(invoke(PrepareRequest(sql))) private[this] def execute(ok: PrepareOK): AsyncStream[Result] = { val execReq = new ExecuteRequest( stmtId = ok.id, params = params.toIndexedSeq, hasNewParams = true, flags = ExecuteRequest.FLAG_CURSOR_READ_ONLY ) AsyncStream.fromFuture(invoke(execReq)) } private[this] def fetch(ok: PrepareOK): () => AsyncStream[T] = { val columns = ok.columns.toIndexedSeq val indexMap = columns.map(_.id).zipWithIndex.toMap val fetchRequest = new FetchRequest(ok, rowsPerFetch) def go(): AsyncStream[T] = { stats.fetchStarted() AsyncStream.fromFuture(invoke(fetchRequest)).flatMap { result => stats.fetchFinished() result match { case fetchResult: FetchResult => // This is somewhat awkward reaching across the abstraction // of Results to touching Packets, but there are future // refactorings that can help clean this up. val rows = fetchResult.rowPackets.map { p => new BinaryEncodedRow(p.body, columns, indexMap, !supportUnsigned) } val asyncSeq = AsyncStream.fromSeq(rows.map(f)) if (!fetchResult.containsLastRow) asyncSeq ++ go() else { AsyncStream.fromFuture(close()).flatMap(_ => asyncSeq) } case r => closeAndLog(s"unexpected reply $r when fetching an element.") } } } go } override def stream: AsyncStream[T] = self.synchronized { state match { case Preparing(s) => s case Fetching(fs) => fs() case Closed => AsyncStream.empty case Init => val s = prepare().flatMap { case ok: PrepareOK => self.synchronized { state = Prepared(ok) }; stream case r => closeAndLog(s"unexpected reply $r when preparing stream.") } // Although unlikely, we want to make sure we don't race // with the closure on `prepare`. if (state == Init) state = Preparing(s) s case prepared: Prepared => val s = execute(prepared.ok).flatMap { case _: ResultSet => stats.streamStarted() val fs = fetch(prepared.ok) self.synchronized { state = Fetching(fs) } fs() case r => closeAndLog(s"unexpected reply $r when executing stream.") } // Although unlikely, we want to make sure we don't race // with the closure on `execute`. if (state == prepared) state = Preparing(s) s } } private[this] def closeAndLog(msg: String): AsyncStream[T] = { logger.error(msg) AsyncStream.fromFuture(close()).flatMap(_ => AsyncStream.empty) } override def close(deadline: Time): Future[Unit] = self.synchronized { state match { case Closed => Future.Unit case _ => stats.streamFinished() state = Closed svc.close(deadline) } } } private class CursorStats(statsReceiver: StatsReceiver) { private[this] val sr = statsReceiver.scope("cursor") private[this] val timePerStreamMsStat = sr.stat("time_per_stream_ms") private[this] val timePerFetchMsStat = sr.stat("time_per_fetch_ms") private[this] val timeBetweenFetchMsStat = sr.stat("time_between_fetch_ms") private[this] val cursorsOpenedCounter = sr.counter("opened") private[this] val cursorsClosedCounter = sr.counter("closed") // used to export stats about the stream life-cycle @volatile private[this] var streamStartTime = Time.Bottom @volatile private[this] var fetchStartTime = Time.Bottom @volatile private[this] var lastFetchEndTime = Time.Bottom def streamStarted(): Unit = { cursorsOpenedCounter.incr() streamStartTime = Time.now } def streamFinished(): Unit = { cursorsClosedCounter.incr() timePerStreamMsStat.add((Time.now - streamStartTime).inMillis) } def fetchStarted(): Unit = { fetchStartTime = Time.now } def fetchFinished(): Time = { val fetchEndTime = Time.now timePerFetchMsStat.add((fetchEndTime - fetchStartTime).inMillis) if (lastFetchEndTime != Time.Bottom) { timeBetweenFetchMsStat.add((fetchStartTime - lastFetchEndTime).inMillis) } lastFetchEndTime = fetchEndTime fetchEndTime } }
mkhq/finagle
finagle-mysql/src/main/scala/com/twitter/finagle/mysql/CursoredStatement.scala
Scala
apache-2.0
7,364
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs // License: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.util import java.io.File class DiffUtilSpec extends EnsimeSpec { "DiffUtil" should "compare original and revised contents and produce a diff in the unified format" in { val originalContent = """|line1 |line2 |line3""".stripMargin.lines.toSeq val revisedContent = """|line1 |new-line2 |line3""".stripMargin.lines.toSeq val a = new File("a").getAbsolutePath() val b = new File("b").getAbsolutePath() val expectedDiff = s"""|--- $a 1970-01-01 12:00:00 +0000 |+++ $b 1970-01-01 12:00:00 +0000 |@@ -1,3 +1,3 @@ | line1 |-line2 |+new-line2 | line3 |""".stripMargin val diff = DiffUtil.compareContents(originalContent, revisedContent) diff should ===(expectedDiff) } }
fommil/ensime-server
core/src/test/scala/org/ensime/util/DiffUtilSpec.scala
Scala
gpl-3.0
971
/* sbt -- Simple Build Tool * Copyright 2010 Mark Harrah */ package sbt package inc import java.io.File import Relations.Source import Relations.SourceDependencies import xsbti.api.{ Source => APISource } import xsbti.DependencyContext import xsbti.DependencyContext._ /** * Provides mappings between source files, generated classes (products), and binaries. * Dependencies that are tracked include internal: a dependency on a source in the same compilation group (project), * external: a dependency on a source in another compilation group (tracked as the name of the class), * binary: a dependency on a class or jar file not generated by a source file in any tracked compilation group, * inherited: a dependency that resulted from a public template inheriting, * direct: any type of dependency, including inheritance. */ trait Relations { /** All sources _with at least one product_ . */ def allSources: collection.Set[File] /** All products associated with sources. */ def allProducts: collection.Set[File] /** All files that are recorded as a binary dependency of a source file.*/ def allBinaryDeps: collection.Set[File] /** All files in this compilation group (project) that are recorded as a source dependency of a source file in this group.*/ def allInternalSrcDeps: collection.Set[File] /** All files in another compilation group (project) that are recorded as a source dependency of a source file in this group.*/ def allExternalDeps: collection.Set[String] /** Fully qualified names of classes generated from source file `src`. */ def classNames(src: File): Set[String] /** Source files that generated a class with the given fully qualified `name`. This is typically a set containing a single file. */ def definesClass(name: String): Set[File] /** The classes that were generated for source file `src`. */ def products(src: File): Set[File] /** The source files that generated class file `prod`. This is typically a set containing a single file. */ def produced(prod: File): Set[File] /** The binary dependencies for the source file `src`. */ def binaryDeps(src: File): Set[File] /** The source files that depend on binary file `dep`. */ def usesBinary(dep: File): Set[File] /** Internal source dependencies for `src`. This includes both direct and inherited dependencies. */ def internalSrcDeps(src: File): Set[File] /** Internal source files that depend on internal source `dep`. This includes both direct and inherited dependencies. */ def usesInternalSrc(dep: File): Set[File] /** External source dependencies that internal source file `src` depends on. This includes both direct and inherited dependencies. */ def externalDeps(src: File): Set[String] /** Internal source dependencies that depend on external source file `dep`. This includes both direct and inherited dependencies. */ def usesExternal(dep: String): Set[File] private[inc] def usedNames(src: File): Set[String] /** Records internal source file `src` as generating class file `prod` with top-level class `name`. */ @deprecated("Record all products using `addProducts`.", "0.13.8") def addProduct(src: File, prod: File, name: String): Relations /** * Records internal source file `src` as dependending on `dependsOn`. If this dependency is introduced * by an inheritance relation, `inherited` is set to true. Note that in this case, the dependency is * also registered as a direct dependency. */ @deprecated("Record all external dependencies using `addExternalDeps`.", "0.13.8") def addExternalDep(src: File, dependsOn: String, inherited: Boolean): Relations /** Records internal source file `src` depending on a dependency binary dependency `dependsOn`.*/ @deprecated("Record all binary dependencies using `addBinaryDeps`.", "0.13.8") def addBinaryDep(src: File, dependsOn: File): Relations /** * Records internal source file `src` as having direct dependencies on internal source files `directDependsOn` * and inheritance dependencies on `inheritedDependsOn`. Everything in `inheritedDependsOn` must be included in `directDependsOn`; * this method does not automatically record direct dependencies like `addExternalDep` does. */ @deprecated("Record all internal dependencies using `addInternalSrcDeps(File, Iterable[InternalDependencies])`.", "0.13.8") def addInternalSrcDeps(src: File, directDependsOn: Iterable[File], inheritedDependsOn: Iterable[File]): Relations /** * Records that the file `src` generates products `products`, has internal dependencies `internalDeps`, * has external dependencies `externalDeps` and binary dependencies `binaryDeps`. */ def addSource(src: File, products: Iterable[(File, String)], internalDeps: Iterable[InternalDependency], externalDeps: Iterable[ExternalDependency], binaryDeps: Iterable[(File, String, Stamp)]): Relations = addProducts(src, products).addInternalSrcDeps(src, internalDeps).addExternalDeps(src, externalDeps).addBinaryDeps(src, binaryDeps) /** * Records all the products `prods` generated by `src` */ private[inc] def addProducts(src: File, prods: Iterable[(File, String)]): Relations /** * Records all the internal source dependencies `deps` of `src` */ private[inc] def addInternalSrcDeps(src: File, deps: Iterable[InternalDependency]): Relations /** * Records all the external dependencies `deps` of `src` */ private[inc] def addExternalDeps(src: File, deps: Iterable[ExternalDependency]): Relations /** * Records all the binary dependencies `deps` of `src` */ private[inc] def addBinaryDeps(src: File, deps: Iterable[(File, String, Stamp)]): Relations private[inc] def addUsedName(src: File, name: String): Relations /** Concatenates the two relations. Acts naively, i.e., doesn't internalize external deps on added files. */ def ++(o: Relations): Relations /** Drops all dependency mappings a->b where a is in `sources`. Acts naively, i.e., doesn't externalize internal deps on removed files. */ def --(sources: Iterable[File]): Relations @deprecated("OK to remove in 0.14", "0.13.1") def groupBy[K](f: (File => K)): Map[K, Relations] /** The relation between internal sources and generated class files. */ def srcProd: Relation[File, File] /** The dependency relation between internal sources and binaries. */ def binaryDep: Relation[File, File] /** The dependency relation between internal sources. This includes both direct and inherited dependencies.*/ def internalSrcDep: Relation[File, File] /** The dependency relation between internal and external sources. This includes both direct and inherited dependencies.*/ def externalDep: Relation[File, String] /** All the internal dependencies */ private[inc] def internalDependencies: InternalDependencies /** All the external dependencies */ private[inc] def externalDependencies: ExternalDependencies /** * The source dependency relation between source files introduced by member reference. * * NOTE: All inheritance dependencies are included in this relation because in order to * inherit from a member you have to refer to it. If you check documentation of `inheritance` * you'll see that there's small oddity related to traits being the first parent of a * class/trait that results in additional parents being introduced due to normalization. * This relation properly accounts for that so the invariant that `memberRef` is a superset * of `inheritance` is preserved. */ private[inc] def memberRef: SourceDependencies /** * The source dependency relation between source files introduced by inheritance. * The dependency by inheritance is introduced when a template (class or trait) mentions * a given type in a parent position. * * NOTE: Due to an oddity in how Scala's type checker works there's one unexpected dependency * on a class being introduced. An example illustrates the best the problem. Let's consider * the following structure: * * trait A extends B * trait B extends C * trait C extends D * class D * * We are interested in dependencies by inheritance of `A`. One would expect it to be just `B` * but the answer is `B` and `D`. The reason is because Scala's type checker performs a certain * normalization so the first parent of a type is a class. Therefore the example above is normalized * to the following form: * * trait A extends D with B * trait B extends D with C * trait C extends D * class D * * Therefore if you inherit from a trait you'll get an additional dependency on a class that is * resolved transitively. You should not rely on this behavior, though. * */ private[inc] def inheritance: SourceDependencies /** The dependency relations between sources. These include both direct and inherited dependencies.*/ def direct: Source /** The inheritance dependency relations between sources.*/ def publicInherited: Source /** The relation between a source file and the fully qualified names of classes generated from it.*/ def classes: Relation[File, String] /** * Flag which indicates whether given Relations object supports operations needed by name hashing algorithm. * * At the moment the list includes the following operations: * * - memberRef: SourceDependencies * - inheritance: SourceDependencies * * The `memberRef` and `inheritance` implement a new style source dependency tracking. When this flag is * enabled access to `direct` and `publicInherited` relations is illegal and will cause runtime exception * being thrown. That is done as an optimization that prevents from storing two overlapping sets of * dependencies. * * Conversely, when `nameHashing` flag is disabled access to `memberRef` and `inheritance` * relations is illegal and will cause runtime exception being thrown. */ private[inc] def nameHashing: Boolean /** * Relation between source files and _unqualified_ term and type names used in given source file. */ private[inc] def names: Relation[File, String] /** * Lists of all the pairs (header, relation) that sbt knows of. * Used by TextAnalysisFormat to persist relations. * This cannot be stored as a Map because the order is important. */ private[inc] def allRelations: List[(String, Relation[File, _])] } object Relations { /** * Represents all the relations that sbt knows of along with a way to recreate each * of their elements from their string representation. */ private[inc] val existingRelations = { val string2File: String => File = new File(_) List( ("products", string2File), ("binary dependencies", string2File), ("direct source dependencies", string2File), ("direct external dependencies", identity[String] _), ("public inherited source dependencies", string2File), ("public inherited external dependencies", identity[String] _), ("member reference internal dependencies", string2File), ("member reference external dependencies", identity[String] _), ("inheritance internal dependencies", string2File), ("inheritance external dependencies", identity[String] _), ("class names", identity[String] _), ("used names", identity[String] _)) } /** * Reconstructs a Relations from a list of Relation * The order in which the relations are read matters and is defined by `existingRelations`. */ def construct(nameHashing: Boolean, relations: List[Relation[_, _]]) = relations match { case p :: bin :: di :: de :: pii :: pie :: mri :: mre :: ii :: ie :: cn :: un :: Nil => val srcProd = p.asInstanceOf[Relation[File, File]] val binaryDep = bin.asInstanceOf[Relation[File, File]] val directSrcDeps = makeSource(di.asInstanceOf[Relation[File, File]], de.asInstanceOf[Relation[File, String]]) val publicInheritedSrcDeps = makeSource(pii.asInstanceOf[Relation[File, File]], pie.asInstanceOf[Relation[File, String]]) val memberRefSrcDeps = makeSourceDependencies(mri.asInstanceOf[Relation[File, File]], mre.asInstanceOf[Relation[File, String]]) val inheritanceSrcDeps = makeSourceDependencies(ii.asInstanceOf[Relation[File, File]], ie.asInstanceOf[Relation[File, String]]) val classes = cn.asInstanceOf[Relation[File, String]] val names = un.asInstanceOf[Relation[File, String]] // we don't check for emptiness of publicInherited/inheritance relations because // we assume that invariant that says they are subsets of direct/memberRef holds assert(nameHashing || (memberRefSrcDeps == emptySourceDependencies), "When name hashing is disabled the `memberRef` relation should be empty.") assert(!nameHashing || (directSrcDeps == emptySource), "When name hashing is enabled the `direct` relation should be empty.") if (nameHashing) { val internal = InternalDependencies(Map(DependencyByMemberRef -> mri.asInstanceOf[Relation[File, File]], DependencyByInheritance -> ii.asInstanceOf[Relation[File, File]])) val external = ExternalDependencies(Map(DependencyByMemberRef -> mre.asInstanceOf[Relation[File, String]], DependencyByInheritance -> ie.asInstanceOf[Relation[File, String]])) Relations.make(srcProd, binaryDep, internal, external, classes, names) } else { assert(names.all.isEmpty, s"When `nameHashing` is disabled `names` relation should be empty: $names") Relations.make(srcProd, binaryDep, directSrcDeps, publicInheritedSrcDeps, classes) } case _ => throw new java.io.IOException(s"Expected to read ${existingRelations.length} relations but read ${relations.length}.") } /** Tracks internal and external source dependencies for a specific dependency type, such as direct or inherited.*/ final class Source private[sbt] (val internal: Relation[File, File], val external: Relation[File, String]) { def addInternal(source: File, dependsOn: Iterable[File]): Source = new Source(internal + (source, dependsOn), external) @deprecated("Use addExternal(File, Iterable[String])", "0.13.8") def addExternal(source: File, dependsOn: String): Source = new Source(internal, external + (source, dependsOn)) def addExternal(source: File, dependsOn: Iterable[String]): Source = new Source(internal, external + (source, dependsOn)) /** Drops all dependency mappings from `sources`. Acts naively, i.e., doesn't externalize internal deps on removed files.*/ def --(sources: Iterable[File]): Source = new Source(internal -- sources, external -- sources) def ++(o: Source): Source = new Source(internal ++ o.internal, external ++ o.external) @deprecated("Broken implementation. OK to remove in 0.14", "0.13.1") def groupBySource[K](f: File => K): Map[K, Source] = { val i = internal.groupBy { case (a, b) => f(a) } val e = external.groupBy { case (a, b) => f(a) } val pairs = for (k <- i.keySet ++ e.keySet) yield (k, new Source(getOrEmpty(i, k), getOrEmpty(e, k))) pairs.toMap } override def equals(other: Any) = other match { case o: Source => internal == o.internal && external == o.external case _ => false } override def hashCode = (internal, external).hashCode } /** Tracks internal and external source dependencies for a specific dependency type, such as direct or inherited.*/ private[inc] final class SourceDependencies(val internal: Relation[File, File], val external: Relation[File, String]) { def addInternal(source: File, dependsOn: Iterable[File]): SourceDependencies = new SourceDependencies(internal + (source, dependsOn), external) @deprecated("Use addExternal(File, Iterable[String])", "0.13.8") def addExternal(source: File, dependsOn: String): SourceDependencies = new SourceDependencies(internal, external + (source, dependsOn)) def addExternal(source: File, dependsOn: Iterable[String]): SourceDependencies = new SourceDependencies(internal, external + (source, dependsOn)) /** Drops all dependency mappings from `sources`. Acts naively, i.e., doesn't externalize internal deps on removed files.*/ def --(sources: Iterable[File]): SourceDependencies = new SourceDependencies(internal -- sources, external -- sources) def ++(o: SourceDependencies): SourceDependencies = new SourceDependencies(internal ++ o.internal, external ++ o.external) override def equals(other: Any) = other match { case o: SourceDependencies => internal == o.internal && external == o.external case _ => false } override def hashCode = (internal, external).hashCode } private[sbt] def getOrEmpty[A, B, K](m: Map[K, Relation[A, B]], k: K): Relation[A, B] = m.getOrElse(k, Relation.empty) private[this] lazy val e = Relation.empty[File, File] private[this] lazy val estr = Relation.empty[File, String] private[this] lazy val es = new Source(e, estr) def emptySource: Source = es private[inc] lazy val emptySourceDependencies: SourceDependencies = new SourceDependencies(e, estr) def empty: Relations = empty(nameHashing = IncOptions.nameHashingDefault) private[inc] def empty(nameHashing: Boolean): Relations = if (nameHashing) new MRelationsNameHashing(e, e, InternalDependencies.empty, ExternalDependencies.empty, estr, estr) else new MRelationsDefaultImpl(e, e, es, es, estr) def make(srcProd: Relation[File, File], binaryDep: Relation[File, File], direct: Source, publicInherited: Source, classes: Relation[File, String]): Relations = new MRelationsDefaultImpl(srcProd, binaryDep, direct = direct, publicInherited = publicInherited, classes) private[inc] def make(srcProd: Relation[File, File], binaryDep: Relation[File, File], internalDependencies: InternalDependencies, externalDependencies: ExternalDependencies, classes: Relation[File, String], names: Relation[File, String]): Relations = new MRelationsNameHashing(srcProd, binaryDep, internalDependencies = internalDependencies, externalDependencies = externalDependencies, classes, names) def makeSource(internal: Relation[File, File], external: Relation[File, String]): Source = new Source(internal, external) private[inc] def makeSourceDependencies(internal: Relation[File, File], external: Relation[File, String]): SourceDependencies = new SourceDependencies(internal, external) } private object DependencyCollection { /** * Combine `m1` and `m2` such that the result contains all the dependencies they represent. * `m1` is expected to be smaller than `m2`. */ def joinMaps[T](m1: Map[DependencyContext, Relation[File, T]], m2: Map[DependencyContext, Relation[File, T]]) = m1.foldLeft(m2) { case (tmp, (key, values)) => tmp.updated(key, tmp.getOrElse(key, Relation.empty) ++ values) } } private object InternalDependencies { /** * Constructs an empty `InteralDependencies` */ def empty = InternalDependencies(Map.empty) } private case class InternalDependencies(dependencies: Map[DependencyContext, Relation[File, File]]) { /** * Adds `dep` to the dependencies */ def +(dep: InternalDependency): InternalDependencies = InternalDependencies(dependencies.updated(dep.context, dependencies.getOrElse(dep.context, Relation.empty) + (dep.sourceFile, dep.targetFile))) /** * Adds all `deps` to the dependencies */ def ++(deps: Iterable[InternalDependency]): InternalDependencies = deps.foldLeft(this)(_ + _) def ++(deps: InternalDependencies): InternalDependencies = InternalDependencies(DependencyCollection.joinMaps(dependencies, deps.dependencies)) /** * Removes all dependencies from `sources` to another file from the dependencies */ def --(sources: Iterable[File]): InternalDependencies = InternalDependencies(dependencies.mapValues(_ -- sources).filter(_._2.size > 0)) } private object ExternalDependencies { /** * Constructs an empty `ExternalDependencies` */ def empty = ExternalDependencies(Map.empty) } private case class ExternalDependencies(dependencies: Map[DependencyContext, Relation[File, String]]) { /** * Adds `dep` to the dependencies */ def +(dep: ExternalDependency): ExternalDependencies = ExternalDependencies(dependencies.updated(dep.context, dependencies.getOrElse(dep.context, Relation.empty) + (dep.sourceFile, dep.targetClassName))) /** * Adds all `deps` to the dependencies */ def ++(deps: Iterable[ExternalDependency]): ExternalDependencies = deps.foldLeft(this)(_ + _) def ++(deps: ExternalDependencies): ExternalDependencies = ExternalDependencies(DependencyCollection.joinMaps(dependencies, deps.dependencies)) /** * Removes all dependencies from `sources` to another file from the dependencies */ def --(sources: Iterable[File]): ExternalDependencies = ExternalDependencies(dependencies.mapValues(_ -- sources).filter(_._2.size > 0)) } /** * An abstract class that contains common functionality inherited by two implementations of Relations trait. * * A little note why we have two different implementations of Relations trait. This is needed for the time * being when we are slowly migrating to the new invalidation algorithm called "name hashing" which requires * some subtle changes to dependency tracking. For some time we plan to keep both algorithms side-by-side * and have a runtime switch which allows to pick one. So we need logic for both old and new dependency * tracking to be available. That's exactly what two subclasses of MRelationsCommon implement. Once name * hashing is proven to be stable and reliable we'll phase out the old algorithm and the old dependency tracking * logic. * * `srcProd` is a relation between a source file and a product: (source, product). * Note that some source files may not have a product and will not be included in this relation. * * `binaryDeps` is a relation between a source file and a binary dependency: (source, binary dependency). * This only includes dependencies on classes and jars that do not have a corresponding source/API to track instead. * A class or jar with a corresponding source should only be tracked in one of the source dependency relations. * * `classes` is a relation between a source file and its generated fully-qualified class names. */ private abstract class MRelationsCommon(val srcProd: Relation[File, File], val binaryDep: Relation[File, File], val classes: Relation[File, String]) extends Relations { def allSources: collection.Set[File] = srcProd._1s def allProducts: collection.Set[File] = srcProd._2s def allBinaryDeps: collection.Set[File] = binaryDep._2s def allInternalSrcDeps: collection.Set[File] = internalSrcDep._2s def allExternalDeps: collection.Set[String] = externalDep._2s def classNames(src: File): Set[String] = classes.forward(src) def definesClass(name: String): Set[File] = classes.reverse(name) def products(src: File): Set[File] = srcProd.forward(src) def produced(prod: File): Set[File] = srcProd.reverse(prod) def binaryDeps(src: File): Set[File] = binaryDep.forward(src) def usesBinary(dep: File): Set[File] = binaryDep.reverse(dep) def internalSrcDeps(src: File): Set[File] = internalSrcDep.forward(src) def usesInternalSrc(dep: File): Set[File] = internalSrcDep.reverse(dep) def externalDeps(src: File): Set[String] = externalDep.forward(src) def usesExternal(dep: String): Set[File] = externalDep.reverse(dep) def usedNames(src: File): Set[String] = names.forward(src) /** Making large Relations a little readable. */ private val userDir = sys.props("user.dir").stripSuffix("/") + "/" private def nocwd(s: String) = s stripPrefix userDir private def line_s(kv: (Any, Any)) = " " + nocwd("" + kv._1) + " -> " + nocwd("" + kv._2) + "\n" protected def relation_s(r: Relation[_, _]) = ( if (r.forwardMap.isEmpty) "Relation [ ]" else (r.all.toSeq map line_s sorted) mkString ("Relation [\n", "", "]") ) } /** * This class implements Relations trait with support for tracking of `direct` and `publicInherited` source * dependencies. Therefore this class preserves the "old" (from sbt 0.13.0) dependency tracking logic and it's * a default implementation. * * `direct` defines relations for dependencies between internal and external source dependencies. It includes all types of * dependencies, including inheritance. * * `publicInherited` defines relations for internal and external source dependencies, only including dependencies * introduced by inheritance. * */ private class MRelationsDefaultImpl(srcProd: Relation[File, File], binaryDep: Relation[File, File], // direct should include everything in inherited val direct: Source, val publicInherited: Source, classes: Relation[File, String]) extends MRelationsCommon(srcProd, binaryDep, classes) { def internalSrcDep: Relation[File, File] = direct.internal def externalDep: Relation[File, String] = direct.external def nameHashing: Boolean = false def memberRef: SourceDependencies = throw new UnsupportedOperationException("The `memberRef` source dependencies relation is not supported " + "when `nameHashing` flag is disabled.") def inheritance: SourceDependencies = throw new UnsupportedOperationException("The `memberRef` source dependencies relation is not supported " + "when `nameHashing` flag is disabled.") def addProduct(src: File, prod: File, name: String): Relations = new MRelationsDefaultImpl(srcProd + (src, prod), binaryDep, direct = direct, publicInherited = publicInherited, classes + (src, name)) def addProducts(src: File, products: Iterable[(File, String)]): Relations = new MRelationsDefaultImpl(srcProd ++ products.map(p => (src, p._1)), binaryDep, direct = direct, publicInherited = publicInherited, classes ++ products.map(p => (src, p._2))) def addInternalSrcDeps(src: File, deps: Iterable[InternalDependency]) = { val depsByInheritance = deps.collect { case InternalDependency(_, targetFile, DependencyByInheritance) => targetFile } val newD = direct.addInternal(src, deps.map(_.targetFile)) val newI = publicInherited.addInternal(src, depsByInheritance) new MRelationsDefaultImpl(srcProd, binaryDep, direct = newD, publicInherited = newI, classes) } def addInternalSrcDeps(src: File, directDependsOn: Iterable[File], inheritedDependsOn: Iterable[File]): Relations = { val directDeps = directDependsOn.map(d => InternalDependency(src, d, DependencyByMemberRef)) val inheritedDeps = inheritedDependsOn.map(d => InternalDependency(src, d, DependencyByInheritance)) addInternalSrcDeps(src, directDeps ++ inheritedDeps) } def addExternalDeps(src: File, deps: Iterable[ExternalDependency]) = { val depsByInheritance = deps.collect { case ExternalDependency(_, targetClassName, _, DependencyByInheritance) => targetClassName } val newD = direct.addExternal(src, deps.map(_.targetClassName)) val newI = publicInherited.addExternal(src, depsByInheritance) new MRelationsDefaultImpl(srcProd, binaryDep, direct = newD, publicInherited = newI, classes) } def addExternalDep(src: File, dependsOn: String, inherited: Boolean): Relations = { val newI = if (inherited) publicInherited.addExternal(src, dependsOn :: Nil) else publicInherited val newD = direct.addExternal(src, dependsOn :: Nil) new MRelationsDefaultImpl(srcProd, binaryDep, direct = newD, publicInherited = newI, classes) } def addBinaryDeps(src: File, deps: Iterable[(File, String, Stamp)]) = new MRelationsDefaultImpl(srcProd, binaryDep + (src, deps.map(_._1)), direct, publicInherited, classes) def addBinaryDep(src: File, dependsOn: File): Relations = new MRelationsDefaultImpl(srcProd, binaryDep + (src, dependsOn), direct = direct, publicInherited = publicInherited, classes) def names: Relation[File, String] = throw new UnsupportedOperationException("Tracking of used names is not supported " + "when `nameHashing` is disabled.") def addUsedName(src: File, name: String): Relations = throw new UnsupportedOperationException("Tracking of used names is not supported " + "when `nameHashing` is disabled.") override def externalDependencies: ExternalDependencies = ExternalDependencies(Map(DependencyByMemberRef -> direct.external, DependencyByInheritance -> publicInherited.external)) override def internalDependencies: InternalDependencies = InternalDependencies(Map(DependencyByMemberRef -> direct.internal, DependencyByInheritance -> publicInherited.internal)) def ++(o: Relations): Relations = { if (nameHashing != o.nameHashing) throw new UnsupportedOperationException("The `++` operation is not supported for relations " + "with different values of `nameHashing` flag.") new MRelationsDefaultImpl(srcProd ++ o.srcProd, binaryDep ++ o.binaryDep, direct ++ o.direct, publicInherited ++ o.publicInherited, classes ++ o.classes) } def --(sources: Iterable[File]) = new MRelationsDefaultImpl(srcProd -- sources, binaryDep -- sources, direct = direct -- sources, publicInherited = publicInherited -- sources, classes -- sources) @deprecated("Broken implementation. OK to remove in 0.14", "0.13.1") def groupBy[K](f: File => K): Map[K, Relations] = { type MapRel[T] = Map[K, Relation[File, T]] def outerJoin(srcProdMap: MapRel[File], binaryDepMap: MapRel[File], direct: Map[K, Source], inherited: Map[K, Source], classesMap: MapRel[String], namesMap: MapRel[String]): Map[K, Relations] = { def kRelations(k: K): Relations = { def get[T](m: Map[K, Relation[File, T]]) = Relations.getOrEmpty(m, k) def getSrc(m: Map[K, Source]): Source = m.getOrElse(k, Relations.emptySource) def getSrcDeps(m: Map[K, SourceDependencies]): SourceDependencies = m.getOrElse(k, Relations.emptySourceDependencies) new MRelationsDefaultImpl(get(srcProdMap), get(binaryDepMap), getSrc(direct), getSrc(inherited), get(classesMap)) } val keys = (srcProdMap.keySet ++ binaryDepMap.keySet ++ direct.keySet ++ inherited.keySet ++ classesMap.keySet).toList Map(keys.map((k: K) => (k, kRelations(k))): _*) } def f1[B](item: (File, B)): K = f(item._1) outerJoin(srcProd.groupBy(f1), binaryDep.groupBy(f1), direct.groupBySource(f), publicInherited.groupBySource(f), classes.groupBy(f1), names.groupBy(f1)) } override def equals(other: Any) = other match { case o: MRelationsDefaultImpl => srcProd == o.srcProd && binaryDep == o.binaryDep && direct == o.direct && publicInherited == o.publicInherited && classes == o.classes case _ => false } def allRelations = { val rels = List( srcProd, binaryDep, direct.internal, direct.external, publicInherited.internal, publicInherited.external, Relations.emptySourceDependencies.internal, // Default implementation doesn't provide memberRef source deps Relations.emptySourceDependencies.external, // Default implementation doesn't provide memberRef source deps Relations.emptySourceDependencies.internal, // Default implementation doesn't provide inheritance source deps Relations.emptySourceDependencies.external, // Default implementation doesn't provide inheritance source deps classes, Relation.empty[File, String]) // Default implementation doesn't provide used names relation Relations.existingRelations map (_._1) zip rels } override def hashCode = (srcProd :: binaryDep :: direct :: publicInherited :: classes :: Nil).hashCode override def toString = ( """ |Relations: | products: %s | bin deps: %s | src deps direct: %s | src deps inherited: %s | ext deps: %s | class names: %s """.trim.stripMargin.format(List(srcProd, binaryDep, internalSrcDep, publicInherited.internal, externalDep, classes) map relation_s: _*) ) } /** * This class implements Relations trait with support for tracking of `memberRef` and `inheritance` source * dependencies. Therefore this class implements the new (compared to sbt 0.13.0) dependency tracking logic * needed by the name hashing invalidation algorithm. */ private class MRelationsNameHashing(srcProd: Relation[File, File], binaryDep: Relation[File, File], val internalDependencies: InternalDependencies, val externalDependencies: ExternalDependencies, classes: Relation[File, String], val names: Relation[File, String]) extends MRelationsCommon(srcProd, binaryDep, classes) { def direct: Source = throw new UnsupportedOperationException("The `direct` source dependencies relation is not supported " + "when `nameHashing` flag is enabled.") def publicInherited: Source = throw new UnsupportedOperationException("The `publicInherited` source dependencies relation is not supported " + "when `nameHashing` flag is enabled.") val nameHashing: Boolean = true def internalSrcDep: Relation[File, File] = memberRef.internal def externalDep: Relation[File, String] = memberRef.external def addProduct(src: File, prod: File, name: String): Relations = new MRelationsNameHashing(srcProd + (src, prod), binaryDep, internalDependencies = internalDependencies, externalDependencies = externalDependencies, classes + (src, name), names = names) def addProducts(src: File, products: Iterable[(File, String)]): Relations = new MRelationsNameHashing(srcProd ++ products.map(p => (src, p._1)), binaryDep, internalDependencies = internalDependencies, externalDependencies = externalDependencies, classes ++ products.map(p => (src, p._2)), names = names) def addInternalSrcDeps(src: File, deps: Iterable[InternalDependency]) = new MRelationsNameHashing(srcProd, binaryDep, internalDependencies = internalDependencies ++ deps, externalDependencies = externalDependencies, classes, names) def addInternalSrcDeps(src: File, dependsOn: Iterable[File], inherited: Iterable[File]): Relations = { val memberRefDeps = dependsOn.map(InternalDependency(src, _, DependencyByMemberRef)) val inheritedDeps = inherited.map(InternalDependency(src, _, DependencyByInheritance)) addInternalSrcDeps(src, memberRefDeps ++ inheritedDeps) } def addExternalDeps(src: File, deps: Iterable[ExternalDependency]) = new MRelationsNameHashing(srcProd, binaryDep, internalDependencies = internalDependencies, externalDependencies = externalDependencies ++ deps, classes, names) def addExternalDep(src: File, dependsOn: String, inherited: Boolean): Relations = throw new UnsupportedOperationException("This method is not supported when `nameHashing` flag is enabled.") def addBinaryDeps(src: File, deps: Iterable[(File, String, Stamp)]) = new MRelationsNameHashing(srcProd, binaryDep + (src, deps.map(_._1)), internalDependencies = internalDependencies, externalDependencies = externalDependencies, classes, names) def addBinaryDep(src: File, dependsOn: File): Relations = new MRelationsNameHashing(srcProd, binaryDep + (src, dependsOn), internalDependencies = internalDependencies, externalDependencies = externalDependencies, classes, names = names) def addUsedName(src: File, name: String): Relations = new MRelationsNameHashing(srcProd, binaryDep, internalDependencies = internalDependencies, externalDependencies = externalDependencies, classes, names = names + (src, name)) override def inheritance: SourceDependencies = new SourceDependencies(internalDependencies.dependencies.getOrElse(DependencyByInheritance, Relation.empty), externalDependencies.dependencies.getOrElse(DependencyByInheritance, Relation.empty)) override def memberRef: SourceDependencies = new SourceDependencies(internalDependencies.dependencies.getOrElse(DependencyByMemberRef, Relation.empty), externalDependencies.dependencies.getOrElse(DependencyByMemberRef, Relation.empty)) def ++(o: Relations): Relations = { if (!o.nameHashing) throw new UnsupportedOperationException("The `++` operation is not supported for relations " + "with different values of `nameHashing` flag.") new MRelationsNameHashing(srcProd ++ o.srcProd, binaryDep ++ o.binaryDep, internalDependencies = internalDependencies ++ o.internalDependencies, externalDependencies = externalDependencies ++ o.externalDependencies, classes ++ o.classes, names = names ++ o.names) } def --(sources: Iterable[File]) = new MRelationsNameHashing(srcProd -- sources, binaryDep -- sources, internalDependencies = internalDependencies -- sources, externalDependencies = externalDependencies -- sources, classes -- sources, names = names -- sources) def groupBy[K](f: File => K): Map[K, Relations] = { throw new UnsupportedOperationException("Merging of Analyses that have" + "`relations.nameHashing` set to `true` is not supported.") } override def equals(other: Any) = other match { case o: MRelationsNameHashing => srcProd == o.srcProd && binaryDep == o.binaryDep && memberRef == o.memberRef && inheritance == o.inheritance && classes == o.classes case _ => false } def allRelations = { val rels = List( srcProd, binaryDep, Relations.emptySource.internal, // NameHashing doesn't provide direct dependencies Relations.emptySource.external, // NameHashing doesn't provide direct dependencies Relations.emptySource.internal, // NameHashing doesn't provide public inherited dependencies Relations.emptySource.external, // NameHashing doesn't provide public inherited dependencies memberRef.internal, memberRef.external, inheritance.internal, inheritance.external, classes, names) Relations.existingRelations map (_._1) zip rels } override def hashCode = (srcProd :: binaryDep :: memberRef :: inheritance :: classes :: Nil).hashCode override def toString = ( """ |Relations (with name hashing enabled): | products: %s | bin deps: %s | src deps memberRef: %s | src deps inheritance: %s | ext deps: %s | class names: %s | used names: %s """.trim.stripMargin.format(List(srcProd, binaryDep, memberRef.internal, inheritance.internal, externalDep, classes, names) map relation_s: _*) ) }
som-snytt/xsbt
compile/inc/src/main/scala/sbt/inc/Relations.scala
Scala
bsd-3-clause
38,575
/** * ==== * This file is part of SensApp [ http://sensapp.modelbased.net ] * * Copyright (C) 2011- SINTEF ICT * Contact: SINTEF ICT <[email protected]> * * Module: net.modelbased.sensapp * * SensApp is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * SensApp is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General * Public License along with SensApp. If not, see * <http://www.gnu.org/licenses/>. * ==== * * This file is part of SensApp [ http://sensapp.modelbased.net ] * * Copyright (C) 2012- SINTEF ICT * Contact: SINTEF ICT <[email protected]> * * Module: net.modelbased.sensapp.library.system * * SensApp is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * SensApp is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General * Public License along with SensApp. If not, see * <http://www.gnu.org/licenses/>. */ package net.modelbased.sensapp.library.system //import cc.spray.can.HttpClient //import akka.config.Supervision._ import akka.actor.{Props, ActorSystem} import cc.spray.can.client.HttpClient import cc.spray.io.IoWorker trait HttpSpraySupport { def httpClientName: String implicit val system = ActorSystem() protected lazy val ioWorker = new IoWorker(system).start() protected lazy val httpClient = system.actorOf(props = Props(new HttpClient(ioWorker)), name = httpClientName) }
SINTEF-9012/sensapp
net.modelbased.sensapp.library.system/src/main/scala/net/modelbased/sensapp/library/system/HttpSpraySupport.scala
Scala
lgpl-3.0
2,288
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.admin import kafka.common._ import kafka.cluster.{BrokerEndPoint, Broker} import kafka.log.LogConfig import kafka.server.ConfigType import kafka.utils._ import kafka.utils.ZkUtils._ import kafka.api.{TopicMetadata, PartitionMetadata} import java.util.Random import java.util.Properties import org.apache.kafka.common.errors.{ReplicaNotAvailableException, InvalidTopicException, LeaderNotAvailableException} import org.apache.kafka.common.protocol.{Errors, SecurityProtocol} import scala.Predef._ import scala.collection._ import mutable.ListBuffer import scala.collection.mutable import collection.Map import collection.Set import org.I0Itec.zkclient.exception.ZkNodeExistsException object AdminUtils extends Logging { val rand = new Random val AdminClientId = "__admin_client" val EntityConfigChangeZnodePrefix = "config_change_" /** * There are 2 goals of replica assignment: * 1. Spread the replicas evenly among brokers. * 2. For partitions assigned to a particular broker, their other replicas are spread over the other brokers. * * To achieve this goal, we: * 1. Assign the first replica of each partition by round-robin, starting from a random position in the broker list. * 2. Assign the remaining replicas of each partition with an increasing shift. * * Here is an example of assigning * broker-0 broker-1 broker-2 broker-3 broker-4 * p0 p1 p2 p3 p4 (1st replica) * p5 p6 p7 p8 p9 (1st replica) * p4 p0 p1 p2 p3 (2nd replica) * p8 p9 p5 p6 p7 (2nd replica) * p3 p4 p0 p1 p2 (3nd replica) * p7 p8 p9 p5 p6 (3nd replica) */ def assignReplicasToBrokers(brokerList: Seq[Int], nPartitions: Int, replicationFactor: Int, fixedStartIndex: Int = -1, startPartitionId: Int = -1) : Map[Int, Seq[Int]] = { if (nPartitions <= 0) throw new AdminOperationException("number of partitions must be larger than 0") if (replicationFactor <= 0) throw new AdminOperationException("replication factor must be larger than 0") if (replicationFactor > brokerList.size) throw new AdminOperationException("replication factor: " + replicationFactor + " larger than available brokers: " + brokerList.size) val ret = new mutable.HashMap[Int, List[Int]]() val startIndex = if (fixedStartIndex >= 0) fixedStartIndex else rand.nextInt(brokerList.size) var currentPartitionId = if (startPartitionId >= 0) startPartitionId else 0 var nextReplicaShift = if (fixedStartIndex >= 0) fixedStartIndex else rand.nextInt(brokerList.size) for (i <- 0 until nPartitions) { if (currentPartitionId > 0 && (currentPartitionId % brokerList.size == 0)) nextReplicaShift += 1 val firstReplicaIndex = (currentPartitionId + startIndex) % brokerList.size var replicaList = List(brokerList(firstReplicaIndex)) for (j <- 0 until replicationFactor - 1) replicaList ::= brokerList(replicaIndex(firstReplicaIndex, nextReplicaShift, j, brokerList.size)) ret.put(currentPartitionId, replicaList.reverse) currentPartitionId = currentPartitionId + 1 } ret.toMap } /** * Add partitions to existing topic with optional replica assignment * * @param zkUtils Zookeeper utilities * @param topic Topic for adding partitions to * @param numPartitions Number of partitions to be set * @param replicaAssignmentStr Manual replica assignment * @param checkBrokerAvailable Ignore checking if assigned replica broker is available. Only used for testing */ def addPartitions(zkUtils: ZkUtils, topic: String, numPartitions: Int = 1, replicaAssignmentStr: String = "", checkBrokerAvailable: Boolean = true) { val existingPartitionsReplicaList = zkUtils.getReplicaAssignmentForTopics(List(topic)) if (existingPartitionsReplicaList.size == 0) throw new AdminOperationException("The topic %s does not exist".format(topic)) val existingReplicaList = existingPartitionsReplicaList.head._2 val partitionsToAdd = numPartitions - existingPartitionsReplicaList.size if (partitionsToAdd <= 0) throw new AdminOperationException("The number of partitions for a topic can only be increased") // create the new partition replication list val brokerList = zkUtils.getSortedBrokerList() val newPartitionReplicaList = if (replicaAssignmentStr == null || replicaAssignmentStr == "") AdminUtils.assignReplicasToBrokers(brokerList, partitionsToAdd, existingReplicaList.size, existingReplicaList.head, existingPartitionsReplicaList.size) else getManualReplicaAssignment(replicaAssignmentStr, brokerList.toSet, existingPartitionsReplicaList.size, checkBrokerAvailable) // check if manual assignment has the right replication factor val unmatchedRepFactorList = newPartitionReplicaList.values.filter(p => (p.size != existingReplicaList.size)) if (unmatchedRepFactorList.size != 0) throw new AdminOperationException("The replication factor in manual replication assignment " + " is not equal to the existing replication factor for the topic " + existingReplicaList.size) info("Add partition list for %s is %s".format(topic, newPartitionReplicaList)) val partitionReplicaList = existingPartitionsReplicaList.map(p => p._1.partition -> p._2) // add the new list partitionReplicaList ++= newPartitionReplicaList AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, partitionReplicaList, update = true) } def getManualReplicaAssignment(replicaAssignmentList: String, availableBrokerList: Set[Int], startPartitionId: Int, checkBrokerAvailable: Boolean = true): Map[Int, List[Int]] = { var partitionList = replicaAssignmentList.split(",") val ret = new mutable.HashMap[Int, List[Int]]() var partitionId = startPartitionId partitionList = partitionList.takeRight(partitionList.size - partitionId) for (i <- 0 until partitionList.size) { val brokerList = partitionList(i).split(":").map(s => s.trim().toInt) if (brokerList.size <= 0) throw new AdminOperationException("replication factor must be larger than 0") if (brokerList.size != brokerList.toSet.size) throw new AdminOperationException("duplicate brokers in replica assignment: " + brokerList) if (checkBrokerAvailable && !brokerList.toSet.subsetOf(availableBrokerList)) throw new AdminOperationException("some specified brokers not available. specified brokers: " + brokerList.toString + "available broker:" + availableBrokerList.toString) ret.put(partitionId, brokerList.toList) if (ret(partitionId).size != ret(startPartitionId).size) throw new AdminOperationException("partition " + i + " has different replication factor: " + brokerList) partitionId = partitionId + 1 } ret.toMap } def deleteTopic(zkUtils: ZkUtils, topic: String) { try { zkUtils.createPersistentPath(getDeleteTopicPath(topic)) } catch { case e1: ZkNodeExistsException => throw new TopicAlreadyMarkedForDeletionException( "topic %s is already marked for deletion".format(topic)) case e2: Throwable => throw new AdminOperationException(e2.toString) } } def isConsumerGroupActive(zkUtils: ZkUtils, group: String) = { zkUtils.getConsumersInGroup(group).nonEmpty } /** * Delete the whole directory of the given consumer group if the group is inactive. * * @param zkUtils Zookeeper utilities * @param group Consumer group * @return whether or not we deleted the consumer group information */ def deleteConsumerGroupInZK(zkUtils: ZkUtils, group: String) = { if (!isConsumerGroupActive(zkUtils, group)) { val dir = new ZKGroupDirs(group) zkUtils.deletePathRecursive(dir.consumerGroupDir) true } else false } /** * Delete the given consumer group's information for the given topic in Zookeeper if the group is inactive. * If the consumer group consumes no other topics, delete the whole consumer group directory. * * @param zkUtils Zookeeper utilities * @param group Consumer group * @param topic Topic of the consumer group information we wish to delete * @return whether or not we deleted the consumer group information for the given topic */ def deleteConsumerGroupInfoForTopicInZK(zkUtils: ZkUtils, group: String, topic: String) = { val topics = zkUtils.getTopicsByConsumerGroup(group) if (topics == Seq(topic)) { deleteConsumerGroupInZK(zkUtils, group) } else if (!isConsumerGroupActive(zkUtils, group)) { val dir = new ZKGroupTopicDirs(group, topic) zkUtils.deletePathRecursive(dir.consumerOwnerDir) zkUtils.deletePathRecursive(dir.consumerOffsetDir) true } else false } /** * Delete every inactive consumer group's information about the given topic in Zookeeper. * * @param zkUtils Zookeeper utilities * @param topic Topic of the consumer group information we wish to delete */ def deleteAllConsumerGroupInfoForTopicInZK(zkUtils: ZkUtils, topic: String) { val groups = zkUtils.getAllConsumerGroupsForTopic(topic) groups.foreach(group => deleteConsumerGroupInfoForTopicInZK(zkUtils, group, topic)) } def topicExists(zkUtils: ZkUtils, topic: String): Boolean = zkUtils.zkClient.exists(getTopicPath(topic)) def createTopic(zkUtils: ZkUtils, topic: String, partitions: Int, replicationFactor: Int, topicConfig: Properties = new Properties) { val brokerList = zkUtils.getSortedBrokerList() val replicaAssignment = AdminUtils.assignReplicasToBrokers(brokerList, partitions, replicationFactor) AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, replicaAssignment, topicConfig) } def createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils: ZkUtils, topic: String, partitionReplicaAssignment: Map[Int, Seq[Int]], config: Properties = new Properties, update: Boolean = false) { // validate arguments Topic.validate(topic) require(partitionReplicaAssignment.values.map(_.size).toSet.size == 1, "All partitions should have the same number of replicas.") val topicPath = getTopicPath(topic) if (!update) { if (zkUtils.zkClient.exists(topicPath)) throw new TopicExistsException("Topic \\"%s\\" already exists.".format(topic)) else if (Topic.hasCollisionChars(topic)) { val allTopics = zkUtils.getAllTopics() val collidingTopics = allTopics.filter(t => Topic.hasCollision(topic, t)) if (collidingTopics.nonEmpty) { throw new InvalidTopicException("Topic \\"%s\\" collides with existing topics: %s".format(topic, collidingTopics.mkString(", "))) } } } partitionReplicaAssignment.values.foreach(reps => require(reps.size == reps.toSet.size, "Duplicate replica assignment found: " + partitionReplicaAssignment)) // Configs only matter if a topic is being created. Changing configs via AlterTopic is not supported if (!update) { // write out the config if there is any, this isn't transactional with the partition assignments LogConfig.validate(config) writeEntityConfig(zkUtils, ConfigType.Topic, topic, config) } // create the partition assignment writeTopicPartitionAssignment(zkUtils, topic, partitionReplicaAssignment, update) } private def writeTopicPartitionAssignment(zkUtils: ZkUtils, topic: String, replicaAssignment: Map[Int, Seq[Int]], update: Boolean) { try { val zkPath = getTopicPath(topic) val jsonPartitionData = zkUtils.replicaAssignmentZkData(replicaAssignment.map(e => (e._1.toString -> e._2))) if (!update) { info("Topic creation " + jsonPartitionData.toString) zkUtils.createPersistentPath(zkPath, jsonPartitionData) } else { info("Topic update " + jsonPartitionData.toString) zkUtils.updatePersistentPath(zkPath, jsonPartitionData) } debug("Updated path %s with %s for replica assignment".format(zkPath, jsonPartitionData)) } catch { case e: ZkNodeExistsException => throw new TopicExistsException("topic %s already exists".format(topic)) case e2: Throwable => throw new AdminOperationException(e2.toString) } } /** * Update the config for a client and create a change notification so the change will propagate to other brokers * @param zkUtils Zookeeper utilities used to write the config to ZK * @param clientId: The clientId for which configs are being changed * @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or * existing configs need to be deleted, it should be done prior to invoking this API * */ def changeClientIdConfig(zkUtils: ZkUtils, clientId: String, configs: Properties) { changeEntityConfig(zkUtils, ConfigType.Client, clientId, configs) } /** * Update the config for an existing topic and create a change notification so the change will propagate to other brokers * @param zkUtils Zookeeper utilities used to write the config to ZK * @param topic: The topic for which configs are being changed * @param configs: The final set of configs that will be applied to the topic. If any new configs need to be added or * existing configs need to be deleted, it should be done prior to invoking this API * */ def changeTopicConfig(zkUtils: ZkUtils, topic: String, configs: Properties) { if(!topicExists(zkUtils, topic)) throw new AdminOperationException("Topic \\"%s\\" does not exist.".format(topic)) // remove the topic overrides LogConfig.validate(configs) changeEntityConfig(zkUtils, ConfigType.Topic, topic, configs) } private def changeEntityConfig(zkUtils: ZkUtils, entityType: String, entityName: String, configs: Properties) { // write the new config--may not exist if there were previously no overrides writeEntityConfig(zkUtils, entityType, entityName, configs) // create the change notification val seqNode = ZkUtils.EntityConfigChangesPath + "/" + EntityConfigChangeZnodePrefix val content = Json.encode(getConfigChangeZnodeData(entityType, entityName)) zkUtils.zkClient.createPersistentSequential(seqNode, content) } def getConfigChangeZnodeData(entityType: String, entityName: String) : Map[String, Any] = { Map("version" -> 1, "entity_type" -> entityType, "entity_name" -> entityName) } /** * Write out the topic config to zk, if there is any */ private def writeEntityConfig(zkUtils: ZkUtils, entityType: String, entityName: String, config: Properties) { val configMap: mutable.Map[String, String] = { import JavaConversions._ config } val map = Map("version" -> 1, "config" -> configMap) zkUtils.updatePersistentPath(getEntityConfigPath(entityType, entityName), Json.encode(map)) } /** * Read the entity (topic or client) config (if any) from zk */ def fetchEntityConfig(zkUtils: ZkUtils, entityType: String, entity: String): Properties = { val str: String = zkUtils.zkClient.readData(getEntityConfigPath(entityType, entity), true) val props = new Properties() if(str != null) { Json.parseFull(str) match { case None => // there are no config overrides case Some(mapAnon: Map[_, _]) => val map = mapAnon collect { case (k: String, v: Any) => k -> v } require(map("version") == 1) map.get("config") match { case Some(config: Map[_, _]) => for(configTup <- config) configTup match { case (k: String, v: String) => props.setProperty(k, v) case _ => throw new IllegalArgumentException("Invalid " + entityType + " config: " + str) } case _ => throw new IllegalArgumentException("Invalid " + entityType + " config: " + str) } case o => throw new IllegalArgumentException("Unexpected value in config:(%s), entity_type: (%s), entity: (%s)" .format(str, entityType, entity)) } } props } def fetchAllTopicConfigs(zkUtils: ZkUtils): Map[String, Properties] = zkUtils.getAllTopics().map(topic => (topic, fetchEntityConfig(zkUtils, ConfigType.Topic, topic))).toMap def fetchAllEntityConfigs(zkUtils: ZkUtils, entityType: String): Map[String, Properties] = zkUtils.getAllEntitiesWithConfig(entityType).map(entity => (entity, fetchEntityConfig(zkUtils, entityType, entity))).toMap def fetchTopicMetadataFromZk(topic: String, zkUtils: ZkUtils): TopicMetadata = fetchTopicMetadataFromZk(topic, zkUtils, new mutable.HashMap[Int, Broker]) def fetchTopicMetadataFromZk(topics: Set[String], zkUtils: ZkUtils): Set[TopicMetadata] = { val cachedBrokerInfo = new mutable.HashMap[Int, Broker]() topics.map(topic => fetchTopicMetadataFromZk(topic, zkUtils, cachedBrokerInfo)) } private def fetchTopicMetadataFromZk(topic: String, zkUtils: ZkUtils, cachedBrokerInfo: mutable.HashMap[Int, Broker], protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): TopicMetadata = { if(zkUtils.pathExists(getTopicPath(topic))) { val topicPartitionAssignment = zkUtils.getPartitionAssignmentForTopics(List(topic)).get(topic).get val sortedPartitions = topicPartitionAssignment.toList.sortWith((m1, m2) => m1._1 < m2._1) val partitionMetadata = sortedPartitions.map { partitionMap => val partition = partitionMap._1 val replicas = partitionMap._2 val inSyncReplicas = zkUtils.getInSyncReplicasForPartition(topic, partition) val leader = zkUtils.getLeaderForPartition(topic, partition) debug("replicas = " + replicas + ", in sync replicas = " + inSyncReplicas + ", leader = " + leader) var leaderInfo: Option[BrokerEndPoint] = None var replicaInfo: Seq[BrokerEndPoint] = Nil var isrInfo: Seq[BrokerEndPoint] = Nil try { leaderInfo = leader match { case Some(l) => try { Some(getBrokerInfoFromCache(zkUtils, cachedBrokerInfo, List(l)).head.getBrokerEndPoint(protocol)) } catch { case e: Throwable => throw new LeaderNotAvailableException("Leader not available for partition [%s,%d]".format(topic, partition), e) } case None => throw new LeaderNotAvailableException("No leader exists for partition " + partition) } try { replicaInfo = getBrokerInfoFromCache(zkUtils, cachedBrokerInfo, replicas).map(_.getBrokerEndPoint(protocol)) isrInfo = getBrokerInfoFromCache(zkUtils, cachedBrokerInfo, inSyncReplicas).map(_.getBrokerEndPoint(protocol)) } catch { case e: Throwable => throw new ReplicaNotAvailableException(e) } if(replicaInfo.size < replicas.size) throw new ReplicaNotAvailableException("Replica information not available for following brokers: " + replicas.filterNot(replicaInfo.map(_.id).contains(_)).mkString(",")) if(isrInfo.size < inSyncReplicas.size) throw new ReplicaNotAvailableException("In Sync Replica information not available for following brokers: " + inSyncReplicas.filterNot(isrInfo.map(_.id).contains(_)).mkString(",")) new PartitionMetadata(partition, leaderInfo, replicaInfo, isrInfo, Errors.NONE.code) } catch { case e: Throwable => debug("Error while fetching metadata for partition [%s,%d]".format(topic, partition), e) new PartitionMetadata(partition, leaderInfo, replicaInfo, isrInfo, Errors.forException(e).code) } } new TopicMetadata(topic, partitionMetadata) } else { // topic doesn't exist, send appropriate error code new TopicMetadata(topic, Seq.empty[PartitionMetadata], Errors.UNKNOWN_TOPIC_OR_PARTITION.code) } } private def getBrokerInfoFromCache(zkUtils: ZkUtils, cachedBrokerInfo: scala.collection.mutable.Map[Int, Broker], brokerIds: Seq[Int]): Seq[Broker] = { var failedBrokerIds: ListBuffer[Int] = new ListBuffer() val brokerMetadata = brokerIds.map { id => val optionalBrokerInfo = cachedBrokerInfo.get(id) optionalBrokerInfo match { case Some(brokerInfo) => Some(brokerInfo) // return broker info from the cache case None => // fetch it from zookeeper zkUtils.getBrokerInfo(id) match { case Some(brokerInfo) => cachedBrokerInfo += (id -> brokerInfo) Some(brokerInfo) case None => failedBrokerIds += id None } } } brokerMetadata.filter(_.isDefined).map(_.get) } private def replicaIndex(firstReplicaIndex: Int, secondReplicaShift: Int, replicaIndex: Int, nBrokers: Int): Int = { val shift = 1 + (secondReplicaShift + replicaIndex) % (nBrokers - 1) (firstReplicaIndex + shift) % nBrokers } }
Mszak/kafka
core/src/main/scala/kafka/admin/AdminUtils.scala
Scala
apache-2.0
22,620
import sbt.Keys._ import sbt._ object HmrcBuild extends Build { import uk.gov.hmrc.DefaultBuildSettings._ import uk.gov.hmrc.SbtAutoBuildPlugin import uk.gov.hmrc.versioning.SbtGitVersioning val nameApp = "accessibility-driver" val appDependencies = Seq( "org.seleniumhq.selenium" % "selenium-java" % "2.45.0", "org.seleniumhq.selenium" % "selenium-firefox-driver" % "2.45.0", "org.littleshoot" % "littleproxy" % "1.0.0-beta8", "joda-time" % "joda-time" % "2.7", "org.joda" % "joda-convert" % "1.7", "commons-codec" % "commons-codec" % "1.10", "commons-io" % "commons-io" % "2.4", "org.jsoup" % "jsoup" % "1.8.2", "org.pegdown" % "pegdown" % "1.4.2" % "test", "org.scalatest" %% "scalatest" % "2.2.4" % "test" ) lazy val project = Project(nameApp, file(".")) .enablePlugins(SbtAutoBuildPlugin, SbtGitVersioning) .settings( targetJvm := "jvm-1.7", libraryDependencies ++= appDependencies, scalaVersion := "2.11.5", crossScalaVersions := Seq("2.10.4", "2.11.5"), resolvers := Seq( "typesafe-releases" at "http://repo.typesafe.com/typesafe/releases/", Resolver.bintrayRepo("hmrc", "releases") ) ) }
kristapsmelderis/accessibility-driver
project/HmrcBuild.scala
Scala
apache-2.0
1,218
/* * Wire * Copyright (C) 2016 Wire Swiss GmbH * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.waz.utils.wrappers import android.database.sqlite.SQLiteProgram import scala.language.implicitConversions trait DBProgram { def bindBlob(index: Int, value: Array[Byte]): Unit def bindNull(index: Int): Unit def bindDouble(index: Int, value: Double): Unit def bindLong(index: Int, value: Long): Unit def bindLong(index: Int, value: Int): Unit def bindString(index: Int, value: String): Unit } class SQLiteProgramWrapper(val program: SQLiteProgram) extends DBProgram { override def bindBlob(index: Int, value: Array[Byte]): Unit = program.bindBlob(index, value) override def bindNull(index: Int): Unit = program.bindNull(index) override def bindDouble(index: Int, value: Double): Unit = program.bindDouble(index, value) override def bindLong(index: Int, value: Long): Unit = program.bindLong(index, value) override def bindLong(index: Int, value: Int): Unit = program.bindLong(index, value) override def bindString(index: Int, value: String): Unit = program.bindString(index, value) } object DBProgram { def apply(program: SQLiteProgram): DBProgram = new SQLiteProgramWrapper(program) implicit def fromAndroid(program: SQLiteProgram): DBProgram = apply(program) }
wireapp/wire-android-sync-engine
zmessaging/src/main/scala/com/waz/utils/wrappers/DBProgram.scala
Scala
gpl-3.0
1,907
/* * Copyright 2008-2010 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb { package jpademo { package model { import _root_.javax.persistence._ /** An author is someone who writes books. */ @Entity class Author { @Id @GeneratedValue(){val strategy = GenerationType.AUTO} var id : Long = _ @Column{val unique = true, val nullable = false} var name : String = "" @OneToMany(){val mappedBy = "author", val targetEntity = classOf[Book]} var books : _root_.java.util.Set[Book] = new _root_.java.util.HashSet[Book]() } } } }
jeppenejsum/liftweb
examples/JPADemo/JPADemo-spa/src/main/scala/net/liftweb/jpademo/model/Author.scala
Scala
apache-2.0
1,103
package epic.sequences import epic.framework.Example import epic.trees.Span /** * A tagged sequence has a sequence of tags and a sequence of words that are in * one-to-one correspondence. think POS tags etc. * @author dlwh */ case class TaggedSequence[+L, +W](tags: IndexedSeq[L], words: IndexedSeq[W], id: String = "") extends Example[IndexedSeq[L], IndexedSeq[W]] { require(tags.length == words.length) def render = { (tags zip words map { case (t, w) => w +"/" + t}).mkString(" ") } def pairs = tags zip words def features = words def length: Int = words.length def label: IndexedSeq[L] = tags def asSegmentation = Segmentation(tags.zipWithIndex.map{case (l, i) => (l -> Span(i, i+1))}, words, id+"-seg") }
maxim-rabinovich/epic
src/main/scala/epic/sequences/TaggedSequence.scala
Scala
apache-2.0
807
package edu.uchicago.cs.encsel.query.tpch import java.io.File import edu.uchicago.cs.encsel.query.{HColumnPredicate, RowTempTable} import edu.uchicago.cs.encsel.query.operator.HorizontalSelect object HorizontalScan extends App { val schema = TPCHSchema.lineitemSchema // val inputFolder = "/home/harper/TPCH/" val inputFolder = args(0) val colIndex = 5 val suffix = ".parquet" val file = new File("%s%s%s".format(inputFolder, schema.getName, suffix)).toURI val recorder = new RowTempTable(schema); val thresholds = Array(6000, 8000, 17000, 36000, 50000, 53000, 63000, 69000) println(thresholds.map(scan(_)).mkString("\\n")) def scan(threshold: Long): Long = { val predicate = new HColumnPredicate((value: Any) => value.asInstanceOf[Double] < threshold, colIndex) val start = System.currentTimeMillis() new HorizontalSelect().select(file, predicate, schema, Array(0, 1, 2, 3, 4)) System.currentTimeMillis() - start } }
harperjiang/enc-selector
src/main/scala/edu/uchicago/cs/encsel/query/tpch/HorizontalScan.scala
Scala
apache-2.0
965
package domala.jdbc.models import domala._ case class Person( @Id @GeneratedValue(strategy = GenerationType.IDENTITY) id: ID[Person] = ID.notAssigned, name: Option[Name], age: Option[Int], address: Address, departmentId: Option[Int], @Version version: Int = -1 )
bakenezumi/domala
core/src/test/scala/domala/jdbc/models/Person.scala
Scala
apache-2.0
283
package com.mesosphere.cosmos.converter // TODO(version): Can this be given more structure (e.g. name and type of field that failed, package, etc.)? final case class ConversionFailure(message: String) extends RuntimeException(message)
movicha/cosmos
cosmos-json/src/main/scala/com/mesosphere/cosmos/converter/ConversionFailure.scala
Scala
apache-2.0
236
package com.krux.hyperion.dataformat import com.krux.hyperion.aws.AdpDynamoDBExportDataFormat import com.krux.hyperion.common.{ BaseFields, PipelineObjectId } /** * Applies a schema to an DynamoDB table to make it accessible by a Hive query. Use * DynamoDBExportDataFormat with a HiveCopyActivity object and DynamoDBDataNode or S3DataNode input * and output. DynamoDBExportDataFormat has the following benefits: * * - Provides both DynamoDB and Amazon S3 support * * - Allows you to filter data by certain columns in your Hive query * * - Exports all attributes from DynamoDB even if you have a sparse schema */ case class DynamoDBExportDataFormat private ( baseFields: BaseFields, dataFormatFields: DataFormatFields ) extends DataFormat { type Self = DynamoDBExportDataFormat def updateBaseFields(fields: BaseFields) = copy(baseFields = fields) def updateDataFormatFields(fields: DataFormatFields) = copy(dataFormatFields = fields) lazy val serialize = AdpDynamoDBExportDataFormat( id = id, name = name, column = columns.map(_.serialize) ) } object DynamoDBExportDataFormat { def apply() = new DynamoDBExportDataFormat( baseFields = BaseFields(PipelineObjectId(DynamoDBExportDataFormat.getClass)), dataFormatFields = DataFormatFields() ) }
hoangelos/hyperion
core/src/main/scala/com/krux/hyperion/dataformat/DynamoDBExportDataFormat.scala
Scala
apache-2.0
1,304
package com.twitter.finagle.http.exp.routing import scala.util.control.NonFatal /** * A named parameter for matching an HTTP Request. A [[Parameter]] may be associated with * different discriminators that make up a route - a URI path, query params, header params, and * cookie params. * * @see [[https://swagger.io/specification/#parameter-object Open API Parameter Spec]] for the * inspiration and expected feature set of [[Parameter Parameters]]. * @see [[Segment.Parameterized Parameterized HTTP Path segments]]. * @note We currently only support defining a parameter for a URI path. Query params, * header params, and cookie params are not currently supported. */ private[http] sealed abstract class Parameter { def name: String // TODO: missing Parameter properties (https://swagger.io/specification/#parameter-object): // - in (enum) {query, header, path, cookie}. for now we assume everything is for path // - required (boolean) // - description (string) // - deprecated (boolean) def parse(value: String): Option[ParameterValue] override def toString: String = s"{$name}" } private[http] final case class StringParam(name: String) extends Parameter { override def parse(value: String): Option[StringValue] = Some(StringValue(value)) } private[http] final case class BooleanParam(name: String) extends Parameter { /** * @note This currently matches spec laid out by OpenAPI and only supports case insensitive * "true" and "false" values. We may need to expand this to support "t" and "1". * @see [[com.twitter.finagle.http.util.StringUtil.toBoolean]] */ override def parse(value: String): Option[BooleanValue] = try { val b = value.toBoolean Some(BooleanValue(value, b)) } catch { case NonFatal(_) => None } } /** See [[https://swagger.io/docs/specification/data-models/data-types/#numbers]] */ // TODO - minimum, maximum, exclusive minimum, exclusive maximum, multipleOf private[http] sealed abstract class NumberParameter extends Parameter private[http] sealed abstract class IntegerParameter extends NumberParameter private[http] final case class IntParam(name: String) extends IntegerParameter { override def parse(value: String): Option[IntValue] = try { val i = value.toInt Some(IntValue(value, i)) } catch { case NonFatal(_) => None } } private[http] final case class LongParam(name: String) extends IntegerParameter { override def parse(value: String): Option[LongValue] = try { val l = value.toLong Some(LongValue(value, l)) } catch { case NonFatal(_) => None } } private[http] final case class FloatParam( name: String) extends NumberParameter { override def parse(value: String): Option[FloatValue] = try { val f = value.toFloat Some(FloatValue(value, f)) } catch { case NonFatal(_) => None } } private[http] final case class DoubleParam( name: String) extends NumberParameter { override def parse(value: String): Option[DoubleValue] = try { val d = value.toDouble Some(DoubleValue(value, d)) } catch { case NonFatal(_) => None } } // TODO - add support for exploding path styles at https://swagger.io/docs/specification/serialization/ // which determines how to parse Array and Object parameters // TODO - array support // TODO - object support (https://swagger.io/specification/#schema-object)
twitter/finagle
finagle-http/src/main/scala/com/twitter/finagle/http/exp/routing/Parameter.scala
Scala
apache-2.0
3,400
package org.http4s package server package blaze import cats.effect.{ConcurrentEffect, Timer} import java.nio.ByteBuffer import javax.net.ssl.SSLEngine import org.http4s.blaze.http.http2.{DefaultFlowStrategy, Http2Settings} import org.http4s.blaze.http.http2.server.{ALPNServerSelector, ServerPriorKnowledgeHandshaker} import org.http4s.blaze.pipeline.{LeafBuilder, TailStage} import org.http4s.blaze.util.TickWheelExecutor import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration import io.chrisdavenport.vault._ /** Facilitates the use of ALPN when using blaze http2 support */ private[blaze] object ProtocolSelector { def apply[F[_]]( engine: SSLEngine, httpApp: HttpApp[F], maxRequestLineLen: Int, maxHeadersLen: Int, chunkBufferMaxSize: Int, requestAttributes: () => Vault, executionContext: ExecutionContext, serviceErrorHandler: ServiceErrorHandler[F], responseHeaderTimeout: Duration, idleTimeout: Duration, scheduler: TickWheelExecutor)( implicit F: ConcurrentEffect[F], timer: Timer[F]): ALPNServerSelector = { def http2Stage(): TailStage[ByteBuffer] = { val newNode = { streamId: Int => LeafBuilder( new Http2NodeStage( streamId, Duration.Inf, executionContext, requestAttributes, httpApp, serviceErrorHandler, responseHeaderTimeout, idleTimeout, scheduler )) } val localSettings = Http2Settings.default.copy( maxConcurrentStreams = 100, // TODO: configurable? maxHeaderListSize = maxHeadersLen) new ServerPriorKnowledgeHandshaker( localSettings = localSettings, flowStrategy = new DefaultFlowStrategy(localSettings), nodeBuilder = newNode) } def http1Stage(): TailStage[ByteBuffer] = Http1ServerStage[F]( httpApp, requestAttributes, executionContext, enableWebSockets = false, maxRequestLineLen, maxHeadersLen, chunkBufferMaxSize, serviceErrorHandler, responseHeaderTimeout, idleTimeout, scheduler ) def preference(protos: Set[String]): String = protos .find { case "h2" | "h2-14" | "h2-15" => true case _ => false } .getOrElse("undefined") def select(s: String): LeafBuilder[ByteBuffer] = LeafBuilder(s match { case "h2" | "h2-14" | "h2-15" => http2Stage() case _ => http1Stage() }) new ALPNServerSelector(engine, preference, select) } }
aeons/http4s
blaze-server/src/main/scala/org/http4s/server/blaze/ProtocolSelector.scala
Scala
apache-2.0
2,681
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.v2.jdbc import java.sql.{Connection, SQLException} import scala.collection.JavaConverters._ import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.analysis.{NoSuchNamespaceException, NoSuchTableException} import org.apache.spark.sql.connector.catalog.{Identifier, Table, TableCatalog, TableChange} import org.apache.spark.sql.connector.expressions.Transform import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcOptionsInWrite, JDBCRDD, JdbcUtils} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects} import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.CaseInsensitiveStringMap class JDBCTableCatalog extends TableCatalog with Logging { private var catalogName: String = null private var options: JDBCOptions = _ private var dialect: JdbcDialect = _ override def name(): String = { require(catalogName != null, "The JDBC table catalog is not initialed") catalogName } override def initialize(name: String, options: CaseInsensitiveStringMap): Unit = { assert(catalogName == null, "The JDBC table catalog is already initialed") catalogName = name val map = options.asCaseSensitiveMap().asScala.toMap // The `JDBCOptions` checks the existence of the table option. This is required by JDBC v1, but // JDBC V2 only knows the table option when loading a table. Here we put a table option with a // fake value, so that it can pass the check of `JDBCOptions`. this.options = new JDBCOptions(map + (JDBCOptions.JDBC_TABLE_NAME -> "__invalid_dbtable")) dialect = JdbcDialects.get(this.options.url) } override def listTables(namespace: Array[String]): Array[Identifier] = { checkNamespace(namespace) withConnection { conn => val schemaPattern = if (namespace.length == 1) namespace.head else null val rs = conn.getMetaData .getTables(null, schemaPattern, "%", Array("TABLE")); new Iterator[Identifier] { def hasNext = rs.next() def next() = Identifier.of(namespace, rs.getString("TABLE_NAME")) }.toArray } } override def tableExists(ident: Identifier): Boolean = { checkNamespace(ident.namespace()) val writeOptions = new JdbcOptionsInWrite( options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident))) classifyException(s"Failed table existence check: $ident") { withConnection(JdbcUtils.tableExists(_, writeOptions)) } } override def dropTable(ident: Identifier): Boolean = { checkNamespace(ident.namespace()) withConnection { conn => try { JdbcUtils.dropTable(conn, getTableName(ident), options) true } catch { case _: SQLException => false } } } override def renameTable(oldIdent: Identifier, newIdent: Identifier): Unit = { checkNamespace(oldIdent.namespace()) withConnection { conn => classifyException(s"Failed table renaming from $oldIdent to $newIdent") { JdbcUtils.renameTable(conn, getTableName(oldIdent), getTableName(newIdent), options) } } } override def loadTable(ident: Identifier): Table = { checkNamespace(ident.namespace()) val optionsWithTableName = new JDBCOptions( options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident))) try { val schema = JDBCRDD.resolveTable(optionsWithTableName) JDBCTable(ident, schema, optionsWithTableName) } catch { case _: SQLException => throw new NoSuchTableException(ident) } } override def createTable( ident: Identifier, schema: StructType, partitions: Array[Transform], properties: java.util.Map[String, String]): Table = { checkNamespace(ident.namespace()) if (partitions.nonEmpty) { throw new UnsupportedOperationException("Cannot create JDBC table with partition") } var tableOptions = options.parameters + (JDBCOptions.JDBC_TABLE_NAME -> getTableName(ident)) var tableComment: String = "" var tableProperties: String = "" if (!properties.isEmpty) { properties.asScala.map { case (k, v) => k match { case "comment" => tableComment = v // ToDo: have a follow up to fail provider once unify create table syntax PR is merged case "provider" => case "owner" => // owner is ignored. It is default to current user name. case "location" => throw new AnalysisException("CREATE TABLE ... LOCATION ... is not supported in" + " JDBC catalog.") case _ => tableProperties = tableProperties + " " + s"$k $v" } } } if (tableComment != "") { tableOptions = tableOptions + (JDBCOptions.JDBC_TABLE_COMMENT -> tableComment) } if (tableProperties != "") { // table property is set in JDBC_CREATE_TABLE_OPTIONS, which will be appended // to CREATE TABLE statement. // E.g., "CREATE TABLE t (name string) ENGINE InnoDB DEFAULT CHARACTER SET utf8" // Spark doesn't check if these table properties are supported by databases. If // table property is invalid, database will fail the table creation. tableOptions = tableOptions + (JDBCOptions.JDBC_CREATE_TABLE_OPTIONS -> tableProperties) } val writeOptions = new JdbcOptionsInWrite(tableOptions) val caseSensitive = SQLConf.get.caseSensitiveAnalysis withConnection { conn => classifyException(s"Failed table creation: $ident") { JdbcUtils.createTable(conn, getTableName(ident), schema, caseSensitive, writeOptions) } } JDBCTable(ident, schema, writeOptions) } override def alterTable(ident: Identifier, changes: TableChange*): Table = { checkNamespace(ident.namespace()) withConnection { conn => classifyException(s"Failed table altering: $ident") { JdbcUtils.alterTable(conn, getTableName(ident), changes, options) } loadTable(ident) } } private def checkNamespace(namespace: Array[String]): Unit = { // In JDBC there is no nested database/schema if (namespace.length > 1) { throw new NoSuchNamespaceException(namespace) } } private def withConnection[T](f: Connection => T): T = { val conn = JdbcUtils.createConnectionFactory(options)() try { f(conn) } finally { conn.close() } } private def getTableName(ident: Identifier): String = { (ident.namespace() :+ ident.name()).map(dialect.quoteIdentifier).mkString(".") } private def classifyException[T](message: String)(f: => T): T = { try { f } catch { case e: Throwable => throw dialect.classifyException(message, e) } } }
shuangshuangwang/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalog.scala
Scala
apache-2.0
7,621
// Copyright (c) 2013-2020 Rob Norris and Contributors // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package doobie.free import cats.~> import cats.effect.kernel.{ CancelScope, Poll, Sync } import cats.free.{ Free => FF } // alias because some algebras have an op called Free import doobie.WeakAsync import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import java.io.InputStream import java.io.Reader import java.lang.Class import java.lang.String import java.math.BigDecimal import java.net.URL import java.sql.Blob import java.sql.Clob import java.sql.Connection import java.sql.Date import java.sql.NClob import java.sql.ParameterMetaData import java.sql.PreparedStatement import java.sql.Ref import java.sql.ResultSet import java.sql.ResultSetMetaData import java.sql.RowId import java.sql.SQLType import java.sql.SQLWarning import java.sql.SQLXML import java.sql.Time import java.sql.Timestamp import java.sql.{ Array => SqlArray } import java.util.Calendar object preparedstatement { module => // Algebra of operations for PreparedStatement. Each accepts a visitor as an alternative to pattern-matching. sealed trait PreparedStatementOp[A] { def visit[F[_]](v: PreparedStatementOp.Visitor[F]): F[A] } // Free monad over PreparedStatementOp. type PreparedStatementIO[A] = FF[PreparedStatementOp, A] // Module of instances and constructors of PreparedStatementOp. object PreparedStatementOp { // Given a PreparedStatement we can embed a PreparedStatementIO program in any algebra that understands embedding. implicit val PreparedStatementOpEmbeddable: Embeddable[PreparedStatementOp, PreparedStatement] = new Embeddable[PreparedStatementOp, PreparedStatement] { def embed[A](j: PreparedStatement, fa: FF[PreparedStatementOp, A]) = Embedded.PreparedStatement(j, fa) } // Interface for a natural transformation PreparedStatementOp ~> F encoded via the visitor pattern. // This approach is much more efficient than pattern-matching for large algebras. trait Visitor[F[_]] extends (PreparedStatementOp ~> F) { final def apply[A](fa: PreparedStatementOp[A]): F[A] = fa.visit(this) // Common def raw[A](f: PreparedStatement => A): F[A] def embed[A](e: Embedded[A]): F[A] def raiseError[A](e: Throwable): F[A] def handleErrorWith[A](fa: PreparedStatementIO[A])(f: Throwable => PreparedStatementIO[A]): F[A] def monotonic: F[FiniteDuration] def realTime: F[FiniteDuration] def delay[A](thunk: => A): F[A] def suspend[A](hint: Sync.Type)(thunk: => A): F[A] def forceR[A, B](fa: PreparedStatementIO[A])(fb: PreparedStatementIO[B]): F[B] def uncancelable[A](body: Poll[PreparedStatementIO] => PreparedStatementIO[A]): F[A] def poll[A](poll: Any, fa: PreparedStatementIO[A]): F[A] def canceled: F[Unit] def onCancel[A](fa: PreparedStatementIO[A], fin: PreparedStatementIO[Unit]): F[A] def fromFuture[A](fut: PreparedStatementIO[Future[A]]): F[A] // PreparedStatement def addBatch: F[Unit] def addBatch(a: String): F[Unit] def cancel: F[Unit] def clearBatch: F[Unit] def clearParameters: F[Unit] def clearWarnings: F[Unit] def close: F[Unit] def closeOnCompletion: F[Unit] def execute: F[Boolean] def execute(a: String): F[Boolean] def execute(a: String, b: Array[Int]): F[Boolean] def execute(a: String, b: Array[String]): F[Boolean] def execute(a: String, b: Int): F[Boolean] def executeBatch: F[Array[Int]] def executeLargeBatch: F[Array[Long]] def executeLargeUpdate: F[Long] def executeLargeUpdate(a: String): F[Long] def executeLargeUpdate(a: String, b: Array[Int]): F[Long] def executeLargeUpdate(a: String, b: Array[String]): F[Long] def executeLargeUpdate(a: String, b: Int): F[Long] def executeQuery: F[ResultSet] def executeQuery(a: String): F[ResultSet] def executeUpdate: F[Int] def executeUpdate(a: String): F[Int] def executeUpdate(a: String, b: Array[Int]): F[Int] def executeUpdate(a: String, b: Array[String]): F[Int] def executeUpdate(a: String, b: Int): F[Int] def getConnection: F[Connection] def getFetchDirection: F[Int] def getFetchSize: F[Int] def getGeneratedKeys: F[ResultSet] def getLargeMaxRows: F[Long] def getLargeUpdateCount: F[Long] def getMaxFieldSize: F[Int] def getMaxRows: F[Int] def getMetaData: F[ResultSetMetaData] def getMoreResults: F[Boolean] def getMoreResults(a: Int): F[Boolean] def getParameterMetaData: F[ParameterMetaData] def getQueryTimeout: F[Int] def getResultSet: F[ResultSet] def getResultSetConcurrency: F[Int] def getResultSetHoldability: F[Int] def getResultSetType: F[Int] def getUpdateCount: F[Int] def getWarnings: F[SQLWarning] def isCloseOnCompletion: F[Boolean] def isClosed: F[Boolean] def isPoolable: F[Boolean] def isWrapperFor(a: Class[_]): F[Boolean] def setArray(a: Int, b: SqlArray): F[Unit] def setAsciiStream(a: Int, b: InputStream): F[Unit] def setAsciiStream(a: Int, b: InputStream, c: Int): F[Unit] def setAsciiStream(a: Int, b: InputStream, c: Long): F[Unit] def setBigDecimal(a: Int, b: BigDecimal): F[Unit] def setBinaryStream(a: Int, b: InputStream): F[Unit] def setBinaryStream(a: Int, b: InputStream, c: Int): F[Unit] def setBinaryStream(a: Int, b: InputStream, c: Long): F[Unit] def setBlob(a: Int, b: Blob): F[Unit] def setBlob(a: Int, b: InputStream): F[Unit] def setBlob(a: Int, b: InputStream, c: Long): F[Unit] def setBoolean(a: Int, b: Boolean): F[Unit] def setByte(a: Int, b: Byte): F[Unit] def setBytes(a: Int, b: Array[Byte]): F[Unit] def setCharacterStream(a: Int, b: Reader): F[Unit] def setCharacterStream(a: Int, b: Reader, c: Int): F[Unit] def setCharacterStream(a: Int, b: Reader, c: Long): F[Unit] def setClob(a: Int, b: Clob): F[Unit] def setClob(a: Int, b: Reader): F[Unit] def setClob(a: Int, b: Reader, c: Long): F[Unit] def setCursorName(a: String): F[Unit] def setDate(a: Int, b: Date): F[Unit] def setDate(a: Int, b: Date, c: Calendar): F[Unit] def setDouble(a: Int, b: Double): F[Unit] def setEscapeProcessing(a: Boolean): F[Unit] def setFetchDirection(a: Int): F[Unit] def setFetchSize(a: Int): F[Unit] def setFloat(a: Int, b: Float): F[Unit] def setInt(a: Int, b: Int): F[Unit] def setLargeMaxRows(a: Long): F[Unit] def setLong(a: Int, b: Long): F[Unit] def setMaxFieldSize(a: Int): F[Unit] def setMaxRows(a: Int): F[Unit] def setNCharacterStream(a: Int, b: Reader): F[Unit] def setNCharacterStream(a: Int, b: Reader, c: Long): F[Unit] def setNClob(a: Int, b: NClob): F[Unit] def setNClob(a: Int, b: Reader): F[Unit] def setNClob(a: Int, b: Reader, c: Long): F[Unit] def setNString(a: Int, b: String): F[Unit] def setNull(a: Int, b: Int): F[Unit] def setNull(a: Int, b: Int, c: String): F[Unit] def setObject(a: Int, b: AnyRef): F[Unit] def setObject(a: Int, b: AnyRef, c: Int): F[Unit] def setObject(a: Int, b: AnyRef, c: Int, d: Int): F[Unit] def setObject(a: Int, b: AnyRef, c: SQLType): F[Unit] def setObject(a: Int, b: AnyRef, c: SQLType, d: Int): F[Unit] def setPoolable(a: Boolean): F[Unit] def setQueryTimeout(a: Int): F[Unit] def setRef(a: Int, b: Ref): F[Unit] def setRowId(a: Int, b: RowId): F[Unit] def setSQLXML(a: Int, b: SQLXML): F[Unit] def setShort(a: Int, b: Short): F[Unit] def setString(a: Int, b: String): F[Unit] def setTime(a: Int, b: Time): F[Unit] def setTime(a: Int, b: Time, c: Calendar): F[Unit] def setTimestamp(a: Int, b: Timestamp): F[Unit] def setTimestamp(a: Int, b: Timestamp, c: Calendar): F[Unit] def setURL(a: Int, b: URL): F[Unit] def unwrap[T](a: Class[T]): F[T] } // Common operations for all algebras. final case class Raw[A](f: PreparedStatement => A) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.raw(f) } final case class Embed[A](e: Embedded[A]) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.embed(e) } final case class RaiseError[A](e: Throwable) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.raiseError(e) } final case class HandleErrorWith[A](fa: PreparedStatementIO[A], f: Throwable => PreparedStatementIO[A]) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.handleErrorWith(fa)(f) } case object Monotonic extends PreparedStatementOp[FiniteDuration] { def visit[F[_]](v: Visitor[F]) = v.monotonic } case object Realtime extends PreparedStatementOp[FiniteDuration] { def visit[F[_]](v: Visitor[F]) = v.realTime } case class Suspend[A](hint: Sync.Type, thunk: () => A) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.suspend(hint)(thunk()) } case class ForceR[A, B](fa: PreparedStatementIO[A], fb: PreparedStatementIO[B]) extends PreparedStatementOp[B] { def visit[F[_]](v: Visitor[F]) = v.forceR(fa)(fb) } case class Uncancelable[A](body: Poll[PreparedStatementIO] => PreparedStatementIO[A]) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.uncancelable(body) } case class Poll1[A](poll: Any, fa: PreparedStatementIO[A]) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.poll(poll, fa) } case object Canceled extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.canceled } case class OnCancel[A](fa: PreparedStatementIO[A], fin: PreparedStatementIO[Unit]) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.onCancel(fa, fin) } case class FromFuture[A](fut: PreparedStatementIO[Future[A]]) extends PreparedStatementOp[A] { def visit[F[_]](v: Visitor[F]) = v.fromFuture(fut) } // PreparedStatement-specific operations. case object AddBatch extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.addBatch } final case class AddBatch1(a: String) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.addBatch(a) } case object Cancel extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.cancel } case object ClearBatch extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.clearBatch } case object ClearParameters extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.clearParameters } case object ClearWarnings extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.clearWarnings } case object Close extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.close } case object CloseOnCompletion extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.closeOnCompletion } case object Execute extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.execute } final case class Execute1(a: String) extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.execute(a) } final case class Execute2(a: String, b: Array[Int]) extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.execute(a, b) } final case class Execute3(a: String, b: Array[String]) extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.execute(a, b) } final case class Execute4(a: String, b: Int) extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.execute(a, b) } case object ExecuteBatch extends PreparedStatementOp[Array[Int]] { def visit[F[_]](v: Visitor[F]) = v.executeBatch } case object ExecuteLargeBatch extends PreparedStatementOp[Array[Long]] { def visit[F[_]](v: Visitor[F]) = v.executeLargeBatch } case object ExecuteLargeUpdate extends PreparedStatementOp[Long] { def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate } final case class ExecuteLargeUpdate1(a: String) extends PreparedStatementOp[Long] { def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a) } final case class ExecuteLargeUpdate2(a: String, b: Array[Int]) extends PreparedStatementOp[Long] { def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a, b) } final case class ExecuteLargeUpdate3(a: String, b: Array[String]) extends PreparedStatementOp[Long] { def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a, b) } final case class ExecuteLargeUpdate4(a: String, b: Int) extends PreparedStatementOp[Long] { def visit[F[_]](v: Visitor[F]) = v.executeLargeUpdate(a, b) } case object ExecuteQuery extends PreparedStatementOp[ResultSet] { def visit[F[_]](v: Visitor[F]) = v.executeQuery } final case class ExecuteQuery1(a: String) extends PreparedStatementOp[ResultSet] { def visit[F[_]](v: Visitor[F]) = v.executeQuery(a) } case object ExecuteUpdate extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.executeUpdate } final case class ExecuteUpdate1(a: String) extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a) } final case class ExecuteUpdate2(a: String, b: Array[Int]) extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a, b) } final case class ExecuteUpdate3(a: String, b: Array[String]) extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a, b) } final case class ExecuteUpdate4(a: String, b: Int) extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.executeUpdate(a, b) } case object GetConnection extends PreparedStatementOp[Connection] { def visit[F[_]](v: Visitor[F]) = v.getConnection } case object GetFetchDirection extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getFetchDirection } case object GetFetchSize extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getFetchSize } case object GetGeneratedKeys extends PreparedStatementOp[ResultSet] { def visit[F[_]](v: Visitor[F]) = v.getGeneratedKeys } case object GetLargeMaxRows extends PreparedStatementOp[Long] { def visit[F[_]](v: Visitor[F]) = v.getLargeMaxRows } case object GetLargeUpdateCount extends PreparedStatementOp[Long] { def visit[F[_]](v: Visitor[F]) = v.getLargeUpdateCount } case object GetMaxFieldSize extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getMaxFieldSize } case object GetMaxRows extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getMaxRows } case object GetMetaData extends PreparedStatementOp[ResultSetMetaData] { def visit[F[_]](v: Visitor[F]) = v.getMetaData } case object GetMoreResults extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.getMoreResults } final case class GetMoreResults1(a: Int) extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.getMoreResults(a) } case object GetParameterMetaData extends PreparedStatementOp[ParameterMetaData] { def visit[F[_]](v: Visitor[F]) = v.getParameterMetaData } case object GetQueryTimeout extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getQueryTimeout } case object GetResultSet extends PreparedStatementOp[ResultSet] { def visit[F[_]](v: Visitor[F]) = v.getResultSet } case object GetResultSetConcurrency extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getResultSetConcurrency } case object GetResultSetHoldability extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getResultSetHoldability } case object GetResultSetType extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getResultSetType } case object GetUpdateCount extends PreparedStatementOp[Int] { def visit[F[_]](v: Visitor[F]) = v.getUpdateCount } case object GetWarnings extends PreparedStatementOp[SQLWarning] { def visit[F[_]](v: Visitor[F]) = v.getWarnings } case object IsCloseOnCompletion extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.isCloseOnCompletion } case object IsClosed extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.isClosed } case object IsPoolable extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.isPoolable } final case class IsWrapperFor(a: Class[_]) extends PreparedStatementOp[Boolean] { def visit[F[_]](v: Visitor[F]) = v.isWrapperFor(a) } final case class SetArray(a: Int, b: SqlArray) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setArray(a, b) } final case class SetAsciiStream(a: Int, b: InputStream) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b) } final case class SetAsciiStream1(a: Int, b: InputStream, c: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b, c) } final case class SetAsciiStream2(a: Int, b: InputStream, c: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setAsciiStream(a, b, c) } final case class SetBigDecimal(a: Int, b: BigDecimal) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBigDecimal(a, b) } final case class SetBinaryStream(a: Int, b: InputStream) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b) } final case class SetBinaryStream1(a: Int, b: InputStream, c: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b, c) } final case class SetBinaryStream2(a: Int, b: InputStream, c: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBinaryStream(a, b, c) } final case class SetBlob(a: Int, b: Blob) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b) } final case class SetBlob1(a: Int, b: InputStream) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b) } final case class SetBlob2(a: Int, b: InputStream, c: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBlob(a, b, c) } final case class SetBoolean(a: Int, b: Boolean) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBoolean(a, b) } final case class SetByte(a: Int, b: Byte) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setByte(a, b) } final case class SetBytes(a: Int, b: Array[Byte]) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setBytes(a, b) } final case class SetCharacterStream(a: Int, b: Reader) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b) } final case class SetCharacterStream1(a: Int, b: Reader, c: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b, c) } final case class SetCharacterStream2(a: Int, b: Reader, c: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setCharacterStream(a, b, c) } final case class SetClob(a: Int, b: Clob) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setClob(a, b) } final case class SetClob1(a: Int, b: Reader) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setClob(a, b) } final case class SetClob2(a: Int, b: Reader, c: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setClob(a, b, c) } final case class SetCursorName(a: String) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setCursorName(a) } final case class SetDate(a: Int, b: Date) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setDate(a, b) } final case class SetDate1(a: Int, b: Date, c: Calendar) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setDate(a, b, c) } final case class SetDouble(a: Int, b: Double) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setDouble(a, b) } final case class SetEscapeProcessing(a: Boolean) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setEscapeProcessing(a) } final case class SetFetchDirection(a: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setFetchDirection(a) } final case class SetFetchSize(a: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setFetchSize(a) } final case class SetFloat(a: Int, b: Float) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setFloat(a, b) } final case class SetInt(a: Int, b: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setInt(a, b) } final case class SetLargeMaxRows(a: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setLargeMaxRows(a) } final case class SetLong(a: Int, b: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setLong(a, b) } final case class SetMaxFieldSize(a: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setMaxFieldSize(a) } final case class SetMaxRows(a: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setMaxRows(a) } final case class SetNCharacterStream(a: Int, b: Reader) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNCharacterStream(a, b) } final case class SetNCharacterStream1(a: Int, b: Reader, c: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNCharacterStream(a, b, c) } final case class SetNClob(a: Int, b: NClob) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b) } final case class SetNClob1(a: Int, b: Reader) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b) } final case class SetNClob2(a: Int, b: Reader, c: Long) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNClob(a, b, c) } final case class SetNString(a: Int, b: String) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNString(a, b) } final case class SetNull(a: Int, b: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNull(a, b) } final case class SetNull1(a: Int, b: Int, c: String) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setNull(a, b, c) } final case class SetObject(a: Int, b: AnyRef) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setObject(a, b) } final case class SetObject1(a: Int, b: AnyRef, c: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c) } final case class SetObject2(a: Int, b: AnyRef, c: Int, d: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c, d) } final case class SetObject3(a: Int, b: AnyRef, c: SQLType) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c) } final case class SetObject4(a: Int, b: AnyRef, c: SQLType, d: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setObject(a, b, c, d) } final case class SetPoolable(a: Boolean) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setPoolable(a) } final case class SetQueryTimeout(a: Int) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setQueryTimeout(a) } final case class SetRef(a: Int, b: Ref) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setRef(a, b) } final case class SetRowId(a: Int, b: RowId) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setRowId(a, b) } final case class SetSQLXML(a: Int, b: SQLXML) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setSQLXML(a, b) } final case class SetShort(a: Int, b: Short) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setShort(a, b) } final case class SetString(a: Int, b: String) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setString(a, b) } final case class SetTime(a: Int, b: Time) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setTime(a, b) } final case class SetTime1(a: Int, b: Time, c: Calendar) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setTime(a, b, c) } final case class SetTimestamp(a: Int, b: Timestamp) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setTimestamp(a, b) } final case class SetTimestamp1(a: Int, b: Timestamp, c: Calendar) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setTimestamp(a, b, c) } final case class SetURL(a: Int, b: URL) extends PreparedStatementOp[Unit] { def visit[F[_]](v: Visitor[F]) = v.setURL(a, b) } final case class Unwrap[T](a: Class[T]) extends PreparedStatementOp[T] { def visit[F[_]](v: Visitor[F]) = v.unwrap(a) } } import PreparedStatementOp._ // Smart constructors for operations common to all algebras. val unit: PreparedStatementIO[Unit] = FF.pure[PreparedStatementOp, Unit](()) def pure[A](a: A): PreparedStatementIO[A] = FF.pure[PreparedStatementOp, A](a) def raw[A](f: PreparedStatement => A): PreparedStatementIO[A] = FF.liftF(Raw(f)) def embed[F[_], J, A](j: J, fa: FF[F, A])(implicit ev: Embeddable[F, J]): FF[PreparedStatementOp, A] = FF.liftF(Embed(ev.embed(j, fa))) def raiseError[A](err: Throwable): PreparedStatementIO[A] = FF.liftF[PreparedStatementOp, A](RaiseError(err)) def handleErrorWith[A](fa: PreparedStatementIO[A])(f: Throwable => PreparedStatementIO[A]): PreparedStatementIO[A] = FF.liftF[PreparedStatementOp, A](HandleErrorWith(fa, f)) val monotonic = FF.liftF[PreparedStatementOp, FiniteDuration](Monotonic) val realtime = FF.liftF[PreparedStatementOp, FiniteDuration](Realtime) def delay[A](thunk: => A) = FF.liftF[PreparedStatementOp, A](Suspend(Sync.Type.Delay, () => thunk)) def suspend[A](hint: Sync.Type)(thunk: => A) = FF.liftF[PreparedStatementOp, A](Suspend(hint, () => thunk)) def forceR[A, B](fa: PreparedStatementIO[A])(fb: PreparedStatementIO[B]) = FF.liftF[PreparedStatementOp, B](ForceR(fa, fb)) def uncancelable[A](body: Poll[PreparedStatementIO] => PreparedStatementIO[A]) = FF.liftF[PreparedStatementOp, A](Uncancelable(body)) def capturePoll[M[_]](mpoll: Poll[M]) = new Poll[PreparedStatementIO] { def apply[A](fa: PreparedStatementIO[A]) = FF.liftF[PreparedStatementOp, A](Poll1(mpoll, fa)) } val canceled = FF.liftF[PreparedStatementOp, Unit](Canceled) def onCancel[A](fa: PreparedStatementIO[A], fin: PreparedStatementIO[Unit]) = FF.liftF[PreparedStatementOp, A](OnCancel(fa, fin)) def fromFuture[A](fut: PreparedStatementIO[Future[A]]) = FF.liftF[PreparedStatementOp, A](FromFuture(fut)) // Smart constructors for PreparedStatement-specific operations. val addBatch: PreparedStatementIO[Unit] = FF.liftF(AddBatch) def addBatch(a: String): PreparedStatementIO[Unit] = FF.liftF(AddBatch1(a)) val cancel: PreparedStatementIO[Unit] = FF.liftF(Cancel) val clearBatch: PreparedStatementIO[Unit] = FF.liftF(ClearBatch) val clearParameters: PreparedStatementIO[Unit] = FF.liftF(ClearParameters) val clearWarnings: PreparedStatementIO[Unit] = FF.liftF(ClearWarnings) val close: PreparedStatementIO[Unit] = FF.liftF(Close) val closeOnCompletion: PreparedStatementIO[Unit] = FF.liftF(CloseOnCompletion) val execute: PreparedStatementIO[Boolean] = FF.liftF(Execute) def execute(a: String): PreparedStatementIO[Boolean] = FF.liftF(Execute1(a)) def execute(a: String, b: Array[Int]): PreparedStatementIO[Boolean] = FF.liftF(Execute2(a, b)) def execute(a: String, b: Array[String]): PreparedStatementIO[Boolean] = FF.liftF(Execute3(a, b)) def execute(a: String, b: Int): PreparedStatementIO[Boolean] = FF.liftF(Execute4(a, b)) val executeBatch: PreparedStatementIO[Array[Int]] = FF.liftF(ExecuteBatch) val executeLargeBatch: PreparedStatementIO[Array[Long]] = FF.liftF(ExecuteLargeBatch) val executeLargeUpdate: PreparedStatementIO[Long] = FF.liftF(ExecuteLargeUpdate) def executeLargeUpdate(a: String): PreparedStatementIO[Long] = FF.liftF(ExecuteLargeUpdate1(a)) def executeLargeUpdate(a: String, b: Array[Int]): PreparedStatementIO[Long] = FF.liftF(ExecuteLargeUpdate2(a, b)) def executeLargeUpdate(a: String, b: Array[String]): PreparedStatementIO[Long] = FF.liftF(ExecuteLargeUpdate3(a, b)) def executeLargeUpdate(a: String, b: Int): PreparedStatementIO[Long] = FF.liftF(ExecuteLargeUpdate4(a, b)) val executeQuery: PreparedStatementIO[ResultSet] = FF.liftF(ExecuteQuery) def executeQuery(a: String): PreparedStatementIO[ResultSet] = FF.liftF(ExecuteQuery1(a)) val executeUpdate: PreparedStatementIO[Int] = FF.liftF(ExecuteUpdate) def executeUpdate(a: String): PreparedStatementIO[Int] = FF.liftF(ExecuteUpdate1(a)) def executeUpdate(a: String, b: Array[Int]): PreparedStatementIO[Int] = FF.liftF(ExecuteUpdate2(a, b)) def executeUpdate(a: String, b: Array[String]): PreparedStatementIO[Int] = FF.liftF(ExecuteUpdate3(a, b)) def executeUpdate(a: String, b: Int): PreparedStatementIO[Int] = FF.liftF(ExecuteUpdate4(a, b)) val getConnection: PreparedStatementIO[Connection] = FF.liftF(GetConnection) val getFetchDirection: PreparedStatementIO[Int] = FF.liftF(GetFetchDirection) val getFetchSize: PreparedStatementIO[Int] = FF.liftF(GetFetchSize) val getGeneratedKeys: PreparedStatementIO[ResultSet] = FF.liftF(GetGeneratedKeys) val getLargeMaxRows: PreparedStatementIO[Long] = FF.liftF(GetLargeMaxRows) val getLargeUpdateCount: PreparedStatementIO[Long] = FF.liftF(GetLargeUpdateCount) val getMaxFieldSize: PreparedStatementIO[Int] = FF.liftF(GetMaxFieldSize) val getMaxRows: PreparedStatementIO[Int] = FF.liftF(GetMaxRows) val getMetaData: PreparedStatementIO[ResultSetMetaData] = FF.liftF(GetMetaData) val getMoreResults: PreparedStatementIO[Boolean] = FF.liftF(GetMoreResults) def getMoreResults(a: Int): PreparedStatementIO[Boolean] = FF.liftF(GetMoreResults1(a)) val getParameterMetaData: PreparedStatementIO[ParameterMetaData] = FF.liftF(GetParameterMetaData) val getQueryTimeout: PreparedStatementIO[Int] = FF.liftF(GetQueryTimeout) val getResultSet: PreparedStatementIO[ResultSet] = FF.liftF(GetResultSet) val getResultSetConcurrency: PreparedStatementIO[Int] = FF.liftF(GetResultSetConcurrency) val getResultSetHoldability: PreparedStatementIO[Int] = FF.liftF(GetResultSetHoldability) val getResultSetType: PreparedStatementIO[Int] = FF.liftF(GetResultSetType) val getUpdateCount: PreparedStatementIO[Int] = FF.liftF(GetUpdateCount) val getWarnings: PreparedStatementIO[SQLWarning] = FF.liftF(GetWarnings) val isCloseOnCompletion: PreparedStatementIO[Boolean] = FF.liftF(IsCloseOnCompletion) val isClosed: PreparedStatementIO[Boolean] = FF.liftF(IsClosed) val isPoolable: PreparedStatementIO[Boolean] = FF.liftF(IsPoolable) def isWrapperFor(a: Class[_]): PreparedStatementIO[Boolean] = FF.liftF(IsWrapperFor(a)) def setArray(a: Int, b: SqlArray): PreparedStatementIO[Unit] = FF.liftF(SetArray(a, b)) def setAsciiStream(a: Int, b: InputStream): PreparedStatementIO[Unit] = FF.liftF(SetAsciiStream(a, b)) def setAsciiStream(a: Int, b: InputStream, c: Int): PreparedStatementIO[Unit] = FF.liftF(SetAsciiStream1(a, b, c)) def setAsciiStream(a: Int, b: InputStream, c: Long): PreparedStatementIO[Unit] = FF.liftF(SetAsciiStream2(a, b, c)) def setBigDecimal(a: Int, b: BigDecimal): PreparedStatementIO[Unit] = FF.liftF(SetBigDecimal(a, b)) def setBinaryStream(a: Int, b: InputStream): PreparedStatementIO[Unit] = FF.liftF(SetBinaryStream(a, b)) def setBinaryStream(a: Int, b: InputStream, c: Int): PreparedStatementIO[Unit] = FF.liftF(SetBinaryStream1(a, b, c)) def setBinaryStream(a: Int, b: InputStream, c: Long): PreparedStatementIO[Unit] = FF.liftF(SetBinaryStream2(a, b, c)) def setBlob(a: Int, b: Blob): PreparedStatementIO[Unit] = FF.liftF(SetBlob(a, b)) def setBlob(a: Int, b: InputStream): PreparedStatementIO[Unit] = FF.liftF(SetBlob1(a, b)) def setBlob(a: Int, b: InputStream, c: Long): PreparedStatementIO[Unit] = FF.liftF(SetBlob2(a, b, c)) def setBoolean(a: Int, b: Boolean): PreparedStatementIO[Unit] = FF.liftF(SetBoolean(a, b)) def setByte(a: Int, b: Byte): PreparedStatementIO[Unit] = FF.liftF(SetByte(a, b)) def setBytes(a: Int, b: Array[Byte]): PreparedStatementIO[Unit] = FF.liftF(SetBytes(a, b)) def setCharacterStream(a: Int, b: Reader): PreparedStatementIO[Unit] = FF.liftF(SetCharacterStream(a, b)) def setCharacterStream(a: Int, b: Reader, c: Int): PreparedStatementIO[Unit] = FF.liftF(SetCharacterStream1(a, b, c)) def setCharacterStream(a: Int, b: Reader, c: Long): PreparedStatementIO[Unit] = FF.liftF(SetCharacterStream2(a, b, c)) def setClob(a: Int, b: Clob): PreparedStatementIO[Unit] = FF.liftF(SetClob(a, b)) def setClob(a: Int, b: Reader): PreparedStatementIO[Unit] = FF.liftF(SetClob1(a, b)) def setClob(a: Int, b: Reader, c: Long): PreparedStatementIO[Unit] = FF.liftF(SetClob2(a, b, c)) def setCursorName(a: String): PreparedStatementIO[Unit] = FF.liftF(SetCursorName(a)) def setDate(a: Int, b: Date): PreparedStatementIO[Unit] = FF.liftF(SetDate(a, b)) def setDate(a: Int, b: Date, c: Calendar): PreparedStatementIO[Unit] = FF.liftF(SetDate1(a, b, c)) def setDouble(a: Int, b: Double): PreparedStatementIO[Unit] = FF.liftF(SetDouble(a, b)) def setEscapeProcessing(a: Boolean): PreparedStatementIO[Unit] = FF.liftF(SetEscapeProcessing(a)) def setFetchDirection(a: Int): PreparedStatementIO[Unit] = FF.liftF(SetFetchDirection(a)) def setFetchSize(a: Int): PreparedStatementIO[Unit] = FF.liftF(SetFetchSize(a)) def setFloat(a: Int, b: Float): PreparedStatementIO[Unit] = FF.liftF(SetFloat(a, b)) def setInt(a: Int, b: Int): PreparedStatementIO[Unit] = FF.liftF(SetInt(a, b)) def setLargeMaxRows(a: Long): PreparedStatementIO[Unit] = FF.liftF(SetLargeMaxRows(a)) def setLong(a: Int, b: Long): PreparedStatementIO[Unit] = FF.liftF(SetLong(a, b)) def setMaxFieldSize(a: Int): PreparedStatementIO[Unit] = FF.liftF(SetMaxFieldSize(a)) def setMaxRows(a: Int): PreparedStatementIO[Unit] = FF.liftF(SetMaxRows(a)) def setNCharacterStream(a: Int, b: Reader): PreparedStatementIO[Unit] = FF.liftF(SetNCharacterStream(a, b)) def setNCharacterStream(a: Int, b: Reader, c: Long): PreparedStatementIO[Unit] = FF.liftF(SetNCharacterStream1(a, b, c)) def setNClob(a: Int, b: NClob): PreparedStatementIO[Unit] = FF.liftF(SetNClob(a, b)) def setNClob(a: Int, b: Reader): PreparedStatementIO[Unit] = FF.liftF(SetNClob1(a, b)) def setNClob(a: Int, b: Reader, c: Long): PreparedStatementIO[Unit] = FF.liftF(SetNClob2(a, b, c)) def setNString(a: Int, b: String): PreparedStatementIO[Unit] = FF.liftF(SetNString(a, b)) def setNull(a: Int, b: Int): PreparedStatementIO[Unit] = FF.liftF(SetNull(a, b)) def setNull(a: Int, b: Int, c: String): PreparedStatementIO[Unit] = FF.liftF(SetNull1(a, b, c)) def setObject(a: Int, b: AnyRef): PreparedStatementIO[Unit] = FF.liftF(SetObject(a, b)) def setObject(a: Int, b: AnyRef, c: Int): PreparedStatementIO[Unit] = FF.liftF(SetObject1(a, b, c)) def setObject(a: Int, b: AnyRef, c: Int, d: Int): PreparedStatementIO[Unit] = FF.liftF(SetObject2(a, b, c, d)) def setObject(a: Int, b: AnyRef, c: SQLType): PreparedStatementIO[Unit] = FF.liftF(SetObject3(a, b, c)) def setObject(a: Int, b: AnyRef, c: SQLType, d: Int): PreparedStatementIO[Unit] = FF.liftF(SetObject4(a, b, c, d)) def setPoolable(a: Boolean): PreparedStatementIO[Unit] = FF.liftF(SetPoolable(a)) def setQueryTimeout(a: Int): PreparedStatementIO[Unit] = FF.liftF(SetQueryTimeout(a)) def setRef(a: Int, b: Ref): PreparedStatementIO[Unit] = FF.liftF(SetRef(a, b)) def setRowId(a: Int, b: RowId): PreparedStatementIO[Unit] = FF.liftF(SetRowId(a, b)) def setSQLXML(a: Int, b: SQLXML): PreparedStatementIO[Unit] = FF.liftF(SetSQLXML(a, b)) def setShort(a: Int, b: Short): PreparedStatementIO[Unit] = FF.liftF(SetShort(a, b)) def setString(a: Int, b: String): PreparedStatementIO[Unit] = FF.liftF(SetString(a, b)) def setTime(a: Int, b: Time): PreparedStatementIO[Unit] = FF.liftF(SetTime(a, b)) def setTime(a: Int, b: Time, c: Calendar): PreparedStatementIO[Unit] = FF.liftF(SetTime1(a, b, c)) def setTimestamp(a: Int, b: Timestamp): PreparedStatementIO[Unit] = FF.liftF(SetTimestamp(a, b)) def setTimestamp(a: Int, b: Timestamp, c: Calendar): PreparedStatementIO[Unit] = FF.liftF(SetTimestamp1(a, b, c)) def setURL(a: Int, b: URL): PreparedStatementIO[Unit] = FF.liftF(SetURL(a, b)) def unwrap[T](a: Class[T]): PreparedStatementIO[T] = FF.liftF(Unwrap(a)) // Typeclass instances for PreparedStatementIO implicit val WeakAsyncPreparedStatementIO: WeakAsync[PreparedStatementIO] = new WeakAsync[PreparedStatementIO] { val monad = FF.catsFreeMonadForFree[PreparedStatementOp] override val applicative = monad override val rootCancelScope = CancelScope.Cancelable override def pure[A](x: A): PreparedStatementIO[A] = monad.pure(x) override def flatMap[A, B](fa: PreparedStatementIO[A])(f: A => PreparedStatementIO[B]): PreparedStatementIO[B] = monad.flatMap(fa)(f) override def tailRecM[A, B](a: A)(f: A => PreparedStatementIO[Either[A, B]]): PreparedStatementIO[B] = monad.tailRecM(a)(f) override def raiseError[A](e: Throwable): PreparedStatementIO[A] = module.raiseError(e) override def handleErrorWith[A](fa: PreparedStatementIO[A])(f: Throwable => PreparedStatementIO[A]): PreparedStatementIO[A] = module.handleErrorWith(fa)(f) override def monotonic: PreparedStatementIO[FiniteDuration] = module.monotonic override def realTime: PreparedStatementIO[FiniteDuration] = module.realtime override def suspend[A](hint: Sync.Type)(thunk: => A): PreparedStatementIO[A] = module.suspend(hint)(thunk) override def forceR[A, B](fa: PreparedStatementIO[A])(fb: PreparedStatementIO[B]): PreparedStatementIO[B] = module.forceR(fa)(fb) override def uncancelable[A](body: Poll[PreparedStatementIO] => PreparedStatementIO[A]): PreparedStatementIO[A] = module.uncancelable(body) override def canceled: PreparedStatementIO[Unit] = module.canceled override def onCancel[A](fa: PreparedStatementIO[A], fin: PreparedStatementIO[Unit]): PreparedStatementIO[A] = module.onCancel(fa, fin) override def fromFuture[A](fut: PreparedStatementIO[Future[A]]): PreparedStatementIO[A] = module.fromFuture(fut) } }
tpolecat/doobie
modules/free/src/main/scala/doobie/free/preparedstatement.scala
Scala
mit
40,055
package org.scaladebugger.api.lowlevel.requests.filters import com.sun.jdi.ThreadReference import org.scaladebugger.test.helpers.ParallelMockFunSpec import org.scalamock.scalatest.MockFactory import org.scalatest.{FunSpec, Matchers, ParallelTestExecution} class ThreadFilterSpec extends ParallelMockFunSpec { private val mockThreadReference = mock[ThreadReference] private val threadFilter = ThreadFilter(threadReference = mockThreadReference) describe("ThreadFilter") { describe("#toProcessor") { it("should return a processor containing the thread filter") { threadFilter.toProcessor.argument should be (threadFilter) } } } }
chipsenkbeil/scala-debugger
scala-debugger-api/src/test/scala/org/scaladebugger/api/lowlevel/requests/filters/ThreadFilterSpec.scala
Scala
apache-2.0
667
/* Copyright 2012 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding import cascading.scheme.NullScheme import cascading.tuple.Fields import org.apache.hadoop.conf.Configuration import org.scalatest.{ Matchers, WordSpec } class MultiTsvInputJob(args: Args) extends Job(args) { try { MultipleTsvFiles(List("input0", "input1"), ('query, 'queryStats)).read.write(Tsv("output0")) } catch { case e: Exception => e.printStackTrace() } } class SequenceFileInputJob(args: Args) extends Job(args) { try { SequenceFile("input0").read.write(SequenceFile("output0")) WritableSequenceFile("input1", ('query, 'queryStats)).read.write(WritableSequenceFile("output1", ('query, 'queryStats))) } catch { case e: Exception => e.printStackTrace() } } class MultipleTextLineFilesJob(args: Args) extends Job(args) { try { MultipleTextLineFiles(args.list("input"): _*).write(Tsv("output0")) } catch { case e: Exception => e.printStackTrace() } } class FileSourceTest extends WordSpec with Matchers { import Dsl._ "A MultipleTsvFile Source" should { JobTest(new MultiTsvInputJob(_)). source(MultipleTsvFiles(List("input0", "input1"), ('query, 'queryStats)), List(("foobar", 1), ("helloworld", 2))). sink[(String, Int)](Tsv("output0")) { outBuf => "take multiple Tsv files as input sources" in { outBuf should have length 2 outBuf.toList shouldBe List(("foobar", 1), ("helloworld", 2)) } } .run .finish() } "A WritableSequenceFile Source" should { JobTest(new SequenceFileInputJob(_)). source(SequenceFile("input0"), List(("foobar0", 1), ("helloworld0", 2))). source(WritableSequenceFile("input1", ('query, 'queryStats)), List(("foobar1", 1), ("helloworld1", 2))). sink[(String, Int)](SequenceFile("output0")) { outBuf => "sequence file input" in { outBuf should have length 2 outBuf.toList shouldBe List(("foobar0", 1), ("helloworld0", 2)) } } .sink[(String, Int)](WritableSequenceFile("output1", ('query, 'queryStats))) { outBuf => "writable sequence file input" in { outBuf should have length 2 outBuf.toList shouldBe List(("foobar1", 1), ("helloworld1", 2)) } } .run .finish() } "A MultipleTextLineFiles Source" should { JobTest(new MultipleTextLineFilesJob(_)) .arg("input", List("input0", "input1")) .source(MultipleTextLineFiles("input0", "input1"), List("foobar", "helloworld")) .sink[String](Tsv("output0")) { outBuf => "take multiple text files as input sources" in { outBuf should have length 2 outBuf.toList shouldBe List("foobar", "helloworld") } } .run .finish() } "TextLine.toIterator" should { "correctly read strings" in { TextLine("../tutorial/data/hello.txt").toIterator(Config.default, Local(true)).toList shouldBe List("Hello world", "Goodbye world") } } /** * The layout of the test data looks like this: * /test_data/2013/02 does not exist * * /test_data/2013/03 (dir with a single data file in it) * /test_data/2013/03/2013-03.txt * * /test_data/2013/04 (dir with a single data file and a _SUCCESS file) * /test_data/2013/04/2013-04.txt * /test_data/2013/04/_SUCCESS * * /test_data/2013/05 (logically empty dir: git does not support empty dirs) * * /test_data/2013/06 (dir with only a _SUCCESS file) * /test_data/2013/06/_SUCCESS * * /test_data/2013/07 * /test_data/2013/07/2013-07.txt * /test_data/2013/07/_SUCCESS */ "default pathIsGood" should { import TestFileSource.pathIsGood "reject a non-existing directory" in { pathIsGood("test_data/2013/02/") shouldBe false pathIsGood("test_data/2013/02/*") shouldBe false } "accept a directory with data in it" in { pathIsGood("test_data/2013/03/") shouldBe true pathIsGood("test_data/2013/03/*") shouldBe true } "accept a directory with data and _SUCCESS in it" in { pathIsGood("test_data/2013/04/") shouldBe true pathIsGood("test_data/2013/04/*") shouldBe true } "accept a single directory without glob" in { pathIsGood("test_data/2013/05/") shouldBe true } "reject a single directory glob with ignored files" in { pathIsGood("test_data/2013/05/*") shouldBe false } "reject a directory with only _SUCCESS when specified as a glob" in { pathIsGood("test_data/2013/06/*") shouldBe false } "accept a directory with only _SUCCESS when specified without a glob" in { pathIsGood("test_data/2013/06/") shouldBe true } } "FileSource.globHasSuccessFile" should { import TestFileSource.globHasSuccessFile "accept a directory glob with only _SUCCESS" in { globHasSuccessFile("test_data/2013/06/*") shouldBe true } "accept a directory glob with _SUCCESS and other hidden files" in { globHasSuccessFile("test_data/2013/05/*") shouldBe true } "accept a directory glob with _SUCCESS and other non-hidden files" in { globHasSuccessFile("test_data/2013/04/*") shouldBe true } "reject a path without glob" in { globHasSuccessFile("test_data/2013/04/") shouldBe false } "reject a multi-dir glob without _SUCCESS" in { globHasSuccessFile("test_data/2013/{02,03}/*") shouldBe false } } "success file source pathIsGood" should { import TestSuccessFileSource.pathIsGood "reject a non-existing directory" in { pathIsGood("test_data/2013/02/") shouldBe false pathIsGood("test_data/2013/02/*") shouldBe false } "reject a directory with data in it but no _SUCCESS file" in { pathIsGood("test_data/2013/03/") shouldBe false pathIsGood("test_data/2013/03/*") shouldBe false } "reject a single directory without glob" in { pathIsGood("test_data/2013/05/") shouldBe false } "accept a single directory glob with only _SUCCESS and ignored files" in { pathIsGood("test_data/2013/05/*") shouldBe true } "accept a directory with data and _SUCCESS in it when specified as a glob" in { pathIsGood("test_data/2013/04/*") shouldBe true } "reject a directory with data and _SUCCESS in it when specified without a glob" in { pathIsGood("test_data/2013/04/") shouldBe false } "accept a directory with only _SUCCESS when specified as a glob" in { pathIsGood("test_data/2013/06/*") shouldBe true } "reject a directory with only _SUCCESS when specified without a glob" in { pathIsGood("test_data/2013/06/") shouldBe false } "reject a multi-dir glob with only one _SUCCESS" in { pathIsGood("test_data/2013/{03,04}/*") shouldBe false } "accept a multi-dir glob if every dir has _SUCCESS" in { pathIsGood("test_data/2013/{04,08}/*") shouldBe true } "accept a multi-dir glob if all matched non-hidden directories have _SUCCESS files, even when some are empty" in { pathIsGood("test_data/2013/{04,05,06}/*") shouldBe true } // NOTE: this is an undesirable limitation of SuccessFileSource, and is encoded here // as a demonstration. This isn't a great behavior that we'd want to keep. "accept a multi-dir glob if all dirs with non-hidden files have _SUCCESS while other dirs " + "are empty or don't exist" in { pathIsGood("test_data/2013/{02,04,05}/*") shouldBe true } } "FixedPathSource.hdfsWritePath" should { "crib if path == *" in { intercept[AssertionError] { TestFixedPathSource("*").hdfsWritePath } } "crib if path == /*" in { intercept[AssertionError] { TestFixedPathSource("/*").hdfsWritePath } } "remove /* from a path ending in /*" in { TestFixedPathSource("test_data/2013/06/*").hdfsWritePath shouldBe "test_data/2013/06" } "leave path as-is when it ends in a directory name" in { TestFixedPathSource("test_data/2013/06").hdfsWritePath shouldBe "test_data/2013/06" } "leave path as-is when it ends in a directory name/" in { TestFixedPathSource("test_data/2013/06/").hdfsWritePath shouldBe "test_data/2013/06/" } "leave path as-is when it ends in * without a preceeding /" in { TestFixedPathSource("test_data/2013/06*").hdfsWritePath shouldBe "test_data/2013/06*" } } "invalid source input" should { "Throw in validateTaps in strict mode" in { val e = intercept[InvalidSourceException] { TestInvalidFileSource.validateTaps(Hdfs(strict = true, new Configuration())) } assert(e.getMessage.endsWith("Data is missing from one or more paths in: List(invalid_hdfs_path)")) } "Throw in validateTaps in non-strict mode" in { val e = intercept[InvalidSourceException] { TestInvalidFileSource.validateTaps(Hdfs(strict = false, new Configuration())) } assert(e.getMessage.endsWith("No good paths in: List(invalid_hdfs_path)")) } "Throw in toIterator because no data is present in strict mode" in { val e = intercept[InvalidSourceException] { TestInvalidFileSource.toIterator(Config.default, Hdfs(strict = true, new Configuration())) } assert(e.getMessage.endsWith("Data is missing from one or more paths in: List(invalid_hdfs_path)")) } "Throw in toIterator because no data is present in non-strict mode" in { val e = intercept[InvalidSourceException] { TestInvalidFileSource.toIterator(Config.default, Hdfs(strict = false, new Configuration())) } assert(e.getMessage.endsWith("No good paths in: List(invalid_hdfs_path)")) } } } object TestPath { def getCurrentDirectory = new java.io.File(".").getCanonicalPath def prefix = getCurrentDirectory.split("/").last match { case "scalding-core" => getCurrentDirectory case _ => getCurrentDirectory + "/scalding-core" } val testfsPathRoot = prefix + "/src/test/resources/com/twitter/scalding/test_filesystem/" } object TestFileSource extends FileSource { import TestPath.testfsPathRoot override def hdfsPaths: Iterable[String] = Iterable.empty override def localPaths: Iterable[String] = Iterable.empty val conf = new Configuration() def pathIsGood(p: String) = super.pathIsGood(testfsPathRoot + p, conf) def globHasSuccessFile(p: String) = FileSource.globHasSuccessFile(testfsPathRoot + p, conf) } object TestSuccessFileSource extends FileSource with SuccessFileSource { import TestPath.testfsPathRoot override def hdfsPaths: Iterable[String] = Iterable.empty override def localPaths: Iterable[String] = Iterable.empty val conf = new Configuration() def pathIsGood(p: String) = super.pathIsGood(testfsPathRoot + p, conf) } object TestInvalidFileSource extends FileSource with Mappable[String] { override def hdfsPaths: Iterable[String] = Iterable("invalid_hdfs_path") override def localPaths: Iterable[String] = Iterable("invalid_local_path") override def hdfsScheme = new NullScheme(Fields.ALL, Fields.NONE) override def converter[U >: String] = TupleConverter.asSuperConverter[String, U](implicitly[TupleConverter[String]]) } case class TestFixedPathSource(path: String*) extends FixedPathSource(path: _*)
cchepelov/scalding
scalding-core/src/test/scala/com/twitter/scalding/FileSourceTest.scala
Scala
apache-2.0
11,997
package core.sysevents import scala.language.implicitConversions sealed trait Sysevent { def id: String def componentId: String def >>>(f1: => Seq[FieldAndValue])(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = ctx.evtPublisher.publish(system, this, ctx.commonFields match { case x if x.isEmpty => f1 case x => f1 ++ x }) def >>()(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = >>>(Seq()) def >>(f1: => FieldAndValue)(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = >>>(Seq(f1)) def >>(f1: => FieldAndValue, f2: => FieldAndValue)(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = >>>(Seq(f1, f2)) def >>(f1: => FieldAndValue, f2: => FieldAndValue, f3: => FieldAndValue)(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = >>>(Seq(f1, f2, f3)) def >>(f1: => FieldAndValue, f2: => FieldAndValue, f3: => FieldAndValue, f4: => FieldAndValue)(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = >>>(Seq(f1, f2, f3, f4)) def >>(f1: => FieldAndValue, f2: => FieldAndValue, f3: => FieldAndValue, f4: => FieldAndValue, f5: => FieldAndValue)(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = >>>(Seq(f1, f2, f3, f4, f5)) def >>(f1: => FieldAndValue, f2: => FieldAndValue, f3: => FieldAndValue, f4: => FieldAndValue, f5: => FieldAndValue, f6: => FieldAndValue)(implicit ctx: WithSyseventPublisher, system: SyseventSystem): Unit = >>>(Seq(f1, f2, f3, f4, f5, f6)) } object SyseventOps { implicit def stringToSyseventOps(s: String)(implicit component: SyseventComponent): SyseventOps = new SyseventOps(s, component) implicit def symbolToSyseventOps(s: Symbol)(implicit component: SyseventComponent): SyseventOps = new SyseventOps(s.name, component) } class SyseventOps(id: String, component: SyseventComponent) { def trace: Sysevent = TraceSysevent(id, component.componentId) def info: Sysevent = InfoSysevent(id, component.componentId) def warn: Sysevent = WarnSysevent(id, component.componentId) def error: Sysevent = ErrorSysevent(id, component.componentId) } case class TraceSysevent(id: String, componentId: String) extends Sysevent case class InfoSysevent(id: String, componentId: String) extends Sysevent case class WarnSysevent(id: String, componentId: String) extends Sysevent case class ErrorSysevent(id: String, componentId: String) extends Sysevent
intelix/eventstreams
es-core/es-sysevents/src/main/scala/core/sysevents/Sysevent.scala
Scala
apache-2.0
2,452
// Databricks notebook source // MAGIC %md // MAGIC // MAGIC # [SDS-2.2, Scalable Data Science](https://lamastex.github.io/scalable-data-science/sds/2/2/) // COMMAND ---------- // MAGIC %md // MAGIC Archived YouTube video of this live unedited lab-lecture: // MAGIC // MAGIC [![Archived YouTube video of this live unedited lab-lecture](http://img.youtube.com/vi/3vxH9SGt37E/0.jpg)](https://www.youtube.com/embed/3vxH9SGt37E?start=934&end=988&autoplay=1) [![Archived YouTube video of this live unedited lab-lecture](http://img.youtube.com/vi/5-ABjtLR2To/0.jpg)](https://www.youtube.com/embed/5-ABjtLR2To?start=0&end=293&autoplay=1) // COMMAND ---------- // MAGIC %md // MAGIC # HOMEWORK notebook - RDDs Transformations and Actions // MAGIC Just go through the notebook and familiarize yourself with these transformations and actions. // COMMAND ---------- // MAGIC %md // MAGIC ##### 1. Perform the ``takeOrdered`` action on the RDD // MAGIC // MAGIC To illustrate ``take`` and ``takeOrdered`` actions, let's create a bigger RDD named ``rdd0_1000000`` that is made up of a million integers from 0 to 1000000. // MAGIC We will ``sc.parallelize`` the ``Seq`` Scala collection by using its ``.range(startInteger,stopInteger)`` method. // COMMAND ---------- val rdd0_1000000 = sc.parallelize(Seq.range(0, 1000000)) // <Shift+Enter> to create an RDD of million integers: 0,1,2,...,10^6 // COMMAND ---------- rdd0_1000000.take(5) // <Ctrl+Enter> gives the first 5 elements of the RDD, (0, 1, 2, 3, 4) // COMMAND ---------- // MAGIC %md // MAGIC ``takeordered(n)`` returns ``n`` elements ordered in ascending order (by default) or as specified by the optional key function, as shown below. // COMMAND ---------- rdd0_1000000.takeOrdered(5) // <Shift+Enter> is same as rdd0_1000000.take(5) // COMMAND ---------- rdd0_1000000.takeOrdered(5)(Ordering[Int].reverse) // <Ctrl+Enter> to get the last 5 elements of the RDD 999999, 999998, ..., 999995 // COMMAND ---------- // HOMEWORK: edit the numbers below to get the last 20 elements of an RDD made of a sequence of integers from 669966 to 969696 sc.parallelize(Seq.range(0, 10)).takeOrdered(5)(Ordering[Int].reverse) // <Ctrl+Enter> evaluate this cell after editing it for the right answer // COMMAND ---------- // MAGIC %md // MAGIC ##### 2. More examples of `map` // COMMAND ---------- val rdd = sc.parallelize(Seq(1, 2, 3, 4)) // <Shift+Enter> to evaluate this cell (using default number of partitions) // COMMAND ---------- rdd.map( x => x*2) // <Ctrl+Enter> to transform rdd by map that doubles each element // COMMAND ---------- // MAGIC %md // MAGIC To see what's in the transformed RDD, let's perform the actions of ``count`` and ``collect`` on the ``rdd.map( x => x*2)``, the transformation of ``rdd`` by the ``map`` given by the closure ``x => x*2``. // COMMAND ---------- rdd.map( x => x*2).count() // <Shift+Enter> to perform count (action) the element of the RDD = 4 // COMMAND ---------- rdd.map( x => x*2).collect() // <Shift+Enter> to perform collect (action) to show 2, 4, 6, 8 // COMMAND ---------- // HOMEWORK: uncomment the last line in this cell and modify the '<Fill-In-Here>' in the code below to collect and display the square (x*x) of each element of the RDD // the answer should be Array[Int] = Array(1, 4, 9, 16) Press <Cntrl+Enter> to evaluate the cell after modifying '???' //sc.parallelize(Seq(1, 2, 3, 4)).map( x => <Fill-In-Here> ).collect() // COMMAND ---------- // MAGIC %md // MAGIC // MAGIC ##### 3. More examples of `filter` // MAGIC Let's declare another ``val`` RDD named ``rddFiltered`` by transforming our first RDD named ``rdd`` via the ``filter`` transformation ``x%2==0`` (of being even). // MAGIC // MAGIC This filter transformation based on the closure ``x => x%2==0`` will return ``true`` if the element, modulo two, equals zero. The closure is automatically passed on to the workers for evaluation (when an action is called later). // MAGIC So this will take our RDD of (1,2,3,4) and return RDD of (2, 4). // COMMAND ---------- val rddFiltered = rdd.filter( x => x%2==0 ) // <Ctrl+Enter> to declare rddFiltered from transforming rdd // COMMAND ---------- rddFiltered.collect() // <Ctrl+Enter> to collect (action) elements of rddFiltered; should be (2, 4) // COMMAND ---------- // MAGIC %md // MAGIC ##### 4. More examples of `reduce` // COMMAND ---------- val rdd = sc.parallelize(Array(1,2,3,4,5)) // COMMAND ---------- rdd.reduce( (x,y)=>x+y ) // <Shift+Enter> to do reduce (action) to sum and return Int = 15 // COMMAND ---------- rdd.reduce( _ + _ ) // <Shift+Enter> to do same sum as above and return Int = 15 (undescore syntax) // COMMAND ---------- rdd.reduce( (x,y)=>x*y ) // <Shift+Enter> to do reduce (action) to multiply and return Int = 120 // COMMAND ---------- val rdd0_1000000 = sc.parallelize(Seq.range(0, 1000000)) // <Shift+Enter> to create an RDD of million integers: 0,1,2,...,10^6 // COMMAND ---------- rdd0_1000000.reduce( (x,y)=>x+y ) // <Ctrl+Enter> to do reduce (action) to sum and return Int 1783293664 // COMMAND ---------- // the following correctly returns Int = 0 although for wrong reason // we have flowed out of Int's numeric limits!!! (but got lucky with 0*x=0 for any Int x) // <Shift+Enter> to do reduce (action) to multiply and return Int = 0 rdd0_1000000.reduce( (x,y)=>x*y ) // COMMAND ---------- // <Ctrl+Enter> to do reduce (action) to multiply 1*2*...*9*10 and return correct answer Int = 3628800 sc.parallelize(Seq.range(1, 11)).reduce( (x,y)=>x*y ) // COMMAND ---------- // MAGIC %md // MAGIC **CAUTION: Know the limits of your numeric types!** // COMMAND ---------- // MAGIC %md // MAGIC The minimum and maximum value of `Int` and `Long` types are as follows: // COMMAND ---------- (Int.MinValue , Int.MaxValue) // COMMAND ---------- (Long.MinValue, Long.MaxValue) // COMMAND ---------- // <Ctrl+Enter> to do reduce (action) to multiply 1*2*...*20 and return wrong answer as Int = -2102132736 // we have overflowed out of Int's in a circle back to negative Ints!!! (rigorous distributed numerics, anyone?) sc.parallelize(Seq.range(1, 21)).reduce( (x,y)=>x*y ) // COMMAND ---------- //<Ctrl+Enter> we can accomplish the multiplication using Long Integer types // by adding 'L' ro integer values, Scala infers that it is type Long sc.parallelize(Seq.range(1L, 21L)).reduce( (x,y)=>x*y ) // COMMAND ---------- // MAGIC %md // MAGIC As the following products over Long Integers indicate, they are limited too! // COMMAND ---------- // <Shift+Enter> for wrong answer Long = -8718968878589280256 (due to Long's numeric limits) sc.parallelize(Seq.range(1L, 61L)).reduce( (x,y)=>x*y ) // COMMAND ---------- // <Cntrl+Enter> for wrong answer Long = 0 (due to Long's numeric limits) sc.parallelize(Seq.range(1L, 100L)).reduce( (x,y)=>x*y ) // COMMAND ---------- // MAGIC %md // MAGIC *** // MAGIC // MAGIC ##### 5. Let us do a bunch of transformations to our RDD and perform an action // MAGIC // MAGIC * start from a Scala ``Seq``, // MAGIC * ``sc.parallelize`` the list to create an RDD, // MAGIC * ``filter`` that RDD, creating a new filtered RDD, // MAGIC * do a ``map`` transformation that maps that RDD to a new mapped RDD, // MAGIC * and finally, perform a ``reduce`` action to sum the elements in the RDD. // MAGIC // MAGIC This last ``reduce`` action causes the ``parallelize``, the ``filter``, and the ``map`` transformations to actually be executed, and return a result back to the driver machine. // COMMAND ---------- sc.parallelize(Seq(1, 2, 3, 4)) // <Ctrl+Enter> will return Array(4, 8) .filter(x => x%2==0) // (2, 4) is the filtered RDD .map(x => x*2) // (4, 8) is the mapped RDD .reduce(_+_) // 4+8=12 is the final result from reduce // COMMAND ---------- // MAGIC %md // MAGIC ##### 6. Transform the RDD by ``distinct`` to make another RDD // MAGIC // MAGIC Let's declare another RDD named ``rdd2`` that has some repeated elements to apply the ``distinct`` transformation to it. // MAGIC That would give us a new RDD that only contains the distinct elements of the input RDD. // COMMAND ---------- val rdd2 = sc.parallelize(Seq(4, 1, 3, 2, 2, 2, 3, 4)) // <Ctrl+Enter> to declare rdd2 // COMMAND ---------- // MAGIC %md // MAGIC Let's apply the ``distinct`` transformation to ``rdd2`` and have it return a new RDD named ``rdd2Distinct`` that contains the distinct elements of the source RDD ``rdd2``. // COMMAND ---------- val rdd2Distinct = rdd2.distinct() // <Ctrl+Enter> transformation: distinct gives distinct elements of rdd2 // COMMAND ---------- rdd2Distinct.collect() // <Ctrl+Enter> to collect (action) as Array(4, 2, 1, 3) // COMMAND ---------- // MAGIC %md // MAGIC ##### 7. more flatMap // COMMAND ---------- val rdd = sc. parallelize(Array(1,2,3)) // <Shift+Enter> to create an RDD of three Int elements 1,2,3 // COMMAND ---------- // MAGIC %md // MAGIC Let us pass the ``rdd`` above to a map with a closure that will take in each element ``x`` and return ``Array(x, x+5)``. // MAGIC So each element of the mapped RDD named ``rddOfArrays`` is an `Array[Int]`, an array of integers. // COMMAND ---------- // <Shift+Enter> to make RDD of Arrays, i.e., RDD[Array[int]] val rddOfArrays = rdd.map( x => Array(x, x+5) ) // COMMAND ---------- rddOfArrays.collect() // <Ctrl+Enter> to see it is RDD[Array[int]] = (Array(1, 6), Array(2, 7), Array(3, 8)) // COMMAND ---------- // MAGIC %md // MAGIC Now let's observer what happens when we use ``flatMap`` to transform the same ``rdd`` and create another RDD called ``rddfM``. // MAGIC // MAGIC Interestingly, ``flatMap`` *flattens* our ``rdd`` by taking each ``Array`` (or sequence in general) and truning it into individual elements. // MAGIC // MAGIC Thus, we end up with the RDD ``rddfM`` consisting of the elements (1, 6, 2, 7, 3, 8) as shown from the output of ``rddfM.collect`` below. // COMMAND ---------- val rddfM = rdd.flatMap(x => Array(x, x+5)) // <Shift+Enter> to flatMap the rdd using closure (x => Array(x, x+5)) // COMMAND ---------- rddfM.collect // <Ctrl+Enter> to collect rddfM = (1, 6, 2, 7, 3, 8) // COMMAND ----------
raazesh-sainudiin/scalable-data-science
db/2/2/005_RDDsTransformationsActionsHOMEWORK.scala
Scala
unlicense
10,257
package com.arcusys.learn.models.view /** * Created by Iliya Tryapitsin on 19.03.14. */ @deprecated object CurriculumViewModel { }
ViLPy/Valamis
learn-portlet/src/main/scala/com/arcusys/learn/models/view/CurriculumViewModel.scala
Scala
lgpl-3.0
135
package edu.gemini.spModel.io.impl.migration.to2015B import java.io.{StringReader, StringWriter} import edu.gemini.pot.sp.{ISPObservation, ISPProgram, ISPTemplateFolder, ISPTemplateGroup, SPComponentType} import edu.gemini.shared.util.immutable.{None => JNone} import edu.gemini.spModel.io.impl.migration.MigrationTest import edu.gemini.spModel.io.impl.{PioSpXmlParser, PioSpXmlWriter} import edu.gemini.spModel.obscomp.SPNote import edu.gemini.spModel.target.obsComp.TargetObsComp import edu.gemini.spModel.template.{TemplateGroup, TemplateParameters} import org.junit.Assert._ import org.junit.Test import scala.collection.JavaConverters._ // a rudimentary test to make sure it doesn't blow up class TargetConversionTest extends MigrationTest { // Simple conversion from 2015A model. @Test def testTemplateConversion(): Unit = withTestProgram("GS-2015B-T-1.xml", { (_,p) => validateProgram(p) }) // Read 2015A model, write 2015B model, read back 2015B model, validate. @Test def roundTrip(): Unit = withTestProgram("GS-2015B-T-1.xml", { (odb, p0) => val sw = new StringWriter() new PioSpXmlWriter(sw).printDocument(p0) val parser = new PioSpXmlParser(odb.getFactory) parser.parseDocument(new StringReader(sw.toString)) match { case p1: ISPProgram => validateProgram(p1) case _ => fail("expecting a science program") } }) private def validateProgram(p: ISPProgram): Unit = { validateTemplateFolder(p.getTemplateFolder) validateB1950(p.getObservations.asScala.find(_.getDataObject.getTitle == "Rigel").get) } private def validateTemplateFolder(tf: ISPTemplateFolder): Unit = { def validateGroup(g: ISPTemplateGroup): Unit = { g.getDataObject match { case tg: TemplateGroup => assertEquals("blueprint-0", tg.getBlueprintId) } val tpList = g.getTemplateParameters.asScala.toList.map(_.getDataObject.asInstanceOf[TemplateParameters]) assertEquals(2, tpList.size) // A sidereal and a non-sidereal target tpList.map(_.getTarget) match { case List(tSidereal, tNonSidereal) => assertTrue(tSidereal.isSidereal) assertEquals("Some Sidereal", tSidereal.getName) assertTrue(tNonSidereal.isNonSidereal) assertEquals("S123456", tNonSidereal.getName) case _ => fail("expecting Sidereal, NonSidereal") } g.getObsComponents.asScala.toList match { case List(note) => val txt = note.getDataObject.asInstanceOf[SPNote].getNote assertTrue(txt.contains("S123456 failed")) case _ => fail("expecting a conversion note") } } tf.getTemplateGroups.asScala.toList match { case List(g1, g2) => validateGroup(g2) case _ => fail("expecting two template groups") } } def validateB1950(obs: ISPObservation): Unit = { // unsafe extravaganza! val targetComp = obs.getObsComponents.asScala.find(_.getType == SPComponentType.TELESCOPE_TARGETENV).get val toc = targetComp.getDataObject.asInstanceOf[TargetObsComp] val rigel = toc.getBase.getSiderealTarget.get val when = JNone.instance[java.lang.Long] val ra = rigel.coordinates.ra.toDegrees val dec = rigel.coordinates.dec.toDegrees val dra = rigel.properMotion.map(_.deltaRA.velocity.masPerYear).get val ddec = rigel.properMotion.map(_.deltaDec.velocity.masPerYear).get val epoch = rigel.properMotion.map(_.epoch.year).get // assertEquals("05:14:32.269", (new HMSFormat).format(ra)) // assertEquals("-08:12:05.86", (new DMSFormat).format(dec)) assertEquals("1.30", f"$dra%.2f") assertEquals("0.50", f"$ddec%.2f") assertEquals(2000.0000, epoch, 0.0000001) // Check for the note. val noteComp = obs.getObsComponents.asScala.find(_.getType == SPComponentType.INFO_NOTE).get val text = noteComp.getDataObject.asInstanceOf[SPNote].getNote assertTrue(text.contains("Rigel")) } }
spakzad/ocs
bundle/edu.gemini.spModel.io/src/test/scala/edu/gemini/spModel/io/impl/migration/to2015B/TargetConversionTest.scala
Scala
bsd-3-clause
4,036
/* * Copyright 2017-2018 Azad Bolour * Licensed under GNU Affero General Public License v3.0 - * https://github.com/azadbolour/boardgame/blob/master/LICENSE.md */ package controllers import javax.inject._ import controllers.GameApiJsonSupport._ import controllers.GameDtoConverters._ import com.bolour.boardgame.scala.common.message._ import com.bolour.boardgame.scala.server.domain.GameExceptions._ import com.bolour.boardgame.scala.server.service.GameService import com.bolour.boardgame.scala.common.domain.{Piece, PlayPiece} import org.slf4j.LoggerFactory import play.api.mvc._ import scala.util.{Failure, Success} import play.api.libs.json._ /* * TODO. Not recommended to use InjectedController. Change to use ControllerComponents parameter. * But don't know how to inject that in tests. * TODO. Where does the components parameter come from? */ @Singleton // class GameController @Inject() (service: GameService) extends InjectedController { class GameController @Inject() (cc: ControllerComponents, service: GameService) extends AbstractController(cc) { val logger = LoggerFactory.getLogger(this.getClass) /** Shorthand for validation errors type defined in JsError. */ type ValidationErrors = Seq[(JsPath, Seq[JsonValidationError])] val serverType = "Scala" val apiVersion = "1.0" val handShakeResponse = HandShakeResponse(serverType, apiVersion) def handShake = Action { Ok(Json.toJson(handShakeResponse)) } /** * Action for adding a player taking json input. */ def addPlayer = Action(parse.json) { implicit request => logger.info(s"addPlayer json request: ${request.body}") val maybeValidPlayerDto = validate[PlayerDto](request) processRequest(maybeValidPlayerDto, addPlayerValidated) } /** * Action for adding a player once json input is validated. * Kept public for testing. */ def addPlayerValidated(dto: PlayerDto) = { logger.info(s"addPlayer playerDto: ${dto}") val player = fromPlayerDto(dto) val triedUnit = service.addPlayer(player) triedUnit match { case Failure(ex) => logger.error("addPlayer failure", ex) unprocessable(ex) case Success(_) => logger.info("addPlayer success") Ok(Json.toJson(())) } } /** * Action for starting a game taking json input. */ def startGame = Action(parse.json) { implicit request => logger.info(s"startGame json request: ${request.body}") val maybeValidStartGameRequest = validate[StartGameRequest](request) processRequest(maybeValidStartGameRequest, startGameValidated) } /** * Action for starting a game once json input is validated. * Kept public for testing. */ def startGameValidated(startGameRequest: StartGameRequest) = { logger.info(s"startGame startGameRequest: ${startGameRequest}") startGameRequest match { case StartGameRequest(gameParams, initPieces, pointValues) => { val triedStart = service.startGame(gameParams, initPieces, pointValues) triedStart match { case Failure(ex) => logger.error("startGame failure", ex) unprocessable(ex) case Success(gameState) => { val gameDto = mkStartGameResponse(gameParams, gameState) logger.info(s"startGame success gameDto: ${gameDto}") Ok(Json.toJson(gameDto)) } } } } } // TODO. Change all logging to debug. /** * Action for committing a play taking json input. */ def commitPlay(gameId: String) = Action(parse.json) { implicit request => logger.info(s"commitPlay: json request: ${request.body}") val maybeValidPlayPieces = validate[List[PlayPiece]](request) processGameRequest(gameId, maybeValidPlayPieces, commitPlayValidated) } def commitPlayValidated(gameId: String)(playPieces: List[PlayPiece]) = { logger.info(s"commitPlay play pieces: ${playPieces}") val triedCommit = service.commitPlay(gameId, playPieces) triedCommit match { case Failure(ex) => logger.error("commitPlay failure", ex) unprocessable(ex) case Success((miniState, replacementPieces, deadPoints)) => logger.info(s"commitPlay success - replacements: ${replacementPieces}, mini state: ${miniState}") val response = CommitPlayResponse(miniState, replacementPieces, deadPoints) Ok(Json.toJson(response)) } } // def machinePlay(gameId: String) = Action(parse.json) { implicit request => def machinePlay(gameId: String) = Action { implicit request => logger.info(s"machinePlay") val triedMachinePlay = service.machinePlay(gameId) triedMachinePlay match { case Failure(ex) => logger.info("machinePlay failure", ex) unprocessable(ex) case Success((miniState, playedPieces, deadPoints)) => logger.info(s"machinePlay success - playedPieces: ${playedPieces}, mini state: ${miniState}") val response = MachinePlayResponse(miniState, playedPieces, deadPoints) Ok(Json.toJson(response)) } } def swapPiece(gameId: String) = Action(parse.json) { implicit request => logger.info(s"swapPiece: json request: ${request.body}") val maybeValidPiece = validate[Piece](request) processGameRequest(gameId, maybeValidPiece, swapPieceValidated) } def swapPieceValidated(gameId: String)(piece: Piece) = { logger.info(s"swapPiece piece: ${piece}") val triedSwap = service.swapPiece(gameId, piece) triedSwap match { case Failure(ex) => logger.error("swapPiece failure", ex) unprocessable(ex) case Success((miniState, newPiece)) => logger.info(s"swapPiece success - new piece: ${newPiece}, mini state: ${miniState}") val response = SwapPieceResponse(miniState, newPiece) Ok(Json.toJson(response)) } } // def endGame(gameId: String) = Action(parse.json) { implicit request => def closeGame(gameId: String) = Action { implicit request => logger.info(s"closeGame") val triedSummary = service.endGame(gameId) triedSummary match { case Failure(ex) => logger.error("closeGame failure", ex) unprocessable(ex) case Success(summary) => logger.info("closeGame success") Ok(Json.toJson(summary)) } } private def processRequest[DTO](maybeValid: JsResult[DTO], validProcessor: DTO => Result) = maybeValid.fold(badRequest, validProcessor) private def processGameRequest[DTO](gameId: String, maybeValid: JsResult[DTO], validProcessor: String => DTO => Result) = maybeValid.fold(badRequest, validProcessor(gameId)) private def validate[Body](request: Request[JsValue])(implicit reads: Reads[Body]) = request.body.validate[Body] private def badRequest = (errors: ValidationErrors) => BadRequest(JsError.toJson(errors)) private def unprocessable(th: Throwable) = { def jsonError[DTO](dto: DTO)(implicit writes: Writes[DTO]) = UnprocessableEntity(Json.toJson(dto)) val ex: GameException = th match { case gameEx: GameException => gameEx case _ => InternalGameException("internal error", th) } ex match { case ex: MissingPieceException => jsonError(toMissingPieceErrorDto(ex)) case ex: MissingGameException => jsonError(toMissingGameErrorDto(ex)) case ex: MissingPlayerException => jsonError(toMissingPlayerErrorDto(ex)) case ex: SystemOverloadedException => jsonError(toSystemOverloadedErrorDto(ex)) case ex: InvalidWordException => jsonError(toInvalidWordErrorDto(ex)) case ex: InvalidCrosswordsException => jsonError(toInvalidCrosswordsErrorDto(ex)) case ex: UnsupportedLanguageException => jsonError(toUnsupportedLanguageErrorDto(ex)) case ex: MissingDictionaryException => jsonError(toMissingDictionaryErrorDto(ex)) case ex: MalformedPlayException => jsonError(toMalformedPlayErrorDto(ex)) case ex: InternalGameException => jsonError(toInternalErrorDto(ex)) } } }
azadbolour/boardgame
scala-server/app/controllers/GameController.scala
Scala
agpl-3.0
7,980
/* Copyright (c) 2015, Raymond Dodge All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name "<PRODUCT NAME>" nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.rayrobdod.script; package parser import com.codecommit.antixml.{Attributes => XmlAttrs} /** * Determines, based on an Xml Element's attributes, a function * that determines whether, based on a program's state, whether a * script element is used or not. */ trait AttrsToUseFun[State] { def apply(attrs:XmlAttrs):Function1[State,Boolean] }
rayrobdod/script
src/main/scala/com/rayrobdod/script/parser/AttrsToUseFun.scala
Scala
bsd-3-clause
1,859
/*********************************************************************** * Copyright (c) 2013-2020 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.fs.tools.status import com.beust.jcommander.Parameters import org.locationtech.geomesa.fs.data.FileSystemDataStore import org.locationtech.geomesa.fs.tools.FsDataStoreCommand import org.locationtech.geomesa.fs.tools.FsDataStoreCommand.FsParams import org.locationtech.geomesa.fs.tools.status.FsGetSftConfigCommand.FsGetSftConfigParameters import org.locationtech.geomesa.tools.status.{GetSftConfigCommand, GetSftConfigParams} class FsGetSftConfigCommand extends GetSftConfigCommand[FileSystemDataStore] with FsDataStoreCommand { override val params = new FsGetSftConfigParameters } object FsGetSftConfigCommand { @Parameters(commandDescription = "Get the SimpleFeatureType definition of a schema") class FsGetSftConfigParameters extends FsParams with GetSftConfigParams }
aheyne/geomesa
geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/status/FsGetSftConfigCommand.scala
Scala
apache-2.0
1,279
package org.jetbrains.plugins.scala.lang.autoImport.generated.worksheet import org.jetbrains.plugins.scala.lang.autoImport.{ImportConversionFixTest, WorksheetFiles} class ImportConversionFixWorksheetTest extends ImportConversionFixTest with WorksheetFiles
JetBrains/intellij-scala
scala/worksheet/test/org/jetbrains/plugins/scala/lang/autoImport/generated/worksheet/ImportConversionFixWorksheetTest.scala
Scala
apache-2.0
258
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import org.apache.spark.SparkFunSuite import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.CalendarInterval class LiteralExpressionSuite extends SparkFunSuite with ExpressionEvalHelper { test("null") { checkEvaluation(Literal.create(null, BooleanType), null) checkEvaluation(Literal.create(null, ByteType), null) checkEvaluation(Literal.create(null, ShortType), null) checkEvaluation(Literal.create(null, IntegerType), null) checkEvaluation(Literal.create(null, LongType), null) checkEvaluation(Literal.create(null, FloatType), null) checkEvaluation(Literal.create(null, DoubleType), null) checkEvaluation(Literal.create(null, StringType), null) checkEvaluation(Literal.create(null, BinaryType), null) checkEvaluation(Literal.create(null, DecimalType.USER_DEFAULT), null) checkEvaluation(Literal.create(null, DateType), null) checkEvaluation(Literal.create(null, TimestampType), null) checkEvaluation(Literal.create(null, CalendarIntervalType), null) checkEvaluation(Literal.create(null, ArrayType(ByteType, true)), null) checkEvaluation(Literal.create(null, MapType(StringType, IntegerType)), null) checkEvaluation(Literal.create(null, StructType(Seq.empty)), null) } test("default") { checkEvaluation(Literal.default(BooleanType), false) checkEvaluation(Literal.default(ByteType), 0.toByte) checkEvaluation(Literal.default(ShortType), 0.toShort) checkEvaluation(Literal.default(IntegerType), 0) checkEvaluation(Literal.default(LongType), 0L) checkEvaluation(Literal.default(FloatType), 0.0f) checkEvaluation(Literal.default(DoubleType), 0.0) checkEvaluation(Literal.default(StringType), "") checkEvaluation(Literal.default(BinaryType), "".getBytes) checkEvaluation(Literal.default(DecimalType.USER_DEFAULT), Decimal(0)) checkEvaluation(Literal.default(DecimalType.SYSTEM_DEFAULT), Decimal(0)) checkEvaluation(Literal.default(DateType), DateTimeUtils.toJavaDate(0)) checkEvaluation(Literal.default(TimestampType), DateTimeUtils.toJavaTimestamp(0L)) checkEvaluation(Literal.default(CalendarIntervalType), new CalendarInterval(0, 0L)) checkEvaluation(Literal.default(ArrayType(StringType)), Array()) checkEvaluation(Literal.default(MapType(IntegerType, StringType)), Map()) checkEvaluation(Literal.default(StructType(StructField("a", StringType) :: Nil)), Row("")) } test("boolean literals") { checkEvaluation(Literal(true), true) checkEvaluation(Literal(false), false) } test("int literals") { List(0, 1, Int.MinValue, Int.MaxValue).foreach { d => checkEvaluation(Literal(d), d) checkEvaluation(Literal(d.toLong), d.toLong) checkEvaluation(Literal(d.toShort), d.toShort) checkEvaluation(Literal(d.toByte), d.toByte) } checkEvaluation(Literal(Long.MinValue), Long.MinValue) checkEvaluation(Literal(Long.MaxValue), Long.MaxValue) } test("double literals") { List(0.0, -0.0, Double.NegativeInfinity, Double.PositiveInfinity).foreach { d => checkEvaluation(Literal(d), d) checkEvaluation(Literal(d.toFloat), d.toFloat) } checkEvaluation(Literal(Double.MinValue), Double.MinValue) checkEvaluation(Literal(Double.MaxValue), Double.MaxValue) checkEvaluation(Literal(Float.MinValue), Float.MinValue) checkEvaluation(Literal(Float.MaxValue), Float.MaxValue) } test("string literals") { checkEvaluation(Literal(""), "") checkEvaluation(Literal("test"), "test") checkEvaluation(Literal("\0"), "\0") } test("sum two literals") { checkEvaluation(Add(Literal(1), Literal(1)), 2) } test("binary literals") { checkEvaluation(Literal.create(new Array[Byte](0), BinaryType), new Array[Byte](0)) checkEvaluation(Literal.create(new Array[Byte](2), BinaryType), new Array[Byte](2)) } test("decimal") { List(-0.0001, 0.0, 0.001, 1.2, 1.1111, 5).foreach { d => checkEvaluation(Literal(Decimal(d)), Decimal(d)) checkEvaluation(Literal(Decimal(d.toInt)), Decimal(d.toInt)) checkEvaluation(Literal(Decimal(d.toLong)), Decimal(d.toLong)) checkEvaluation(Literal(Decimal((d * 1000L).toLong, 10, 3)), Decimal((d * 1000L).toLong, 10, 3)) checkEvaluation(Literal(BigDecimal(d.toString)), Decimal(d)) checkEvaluation(Literal(new java.math.BigDecimal(d.toString)), Decimal(d)) } } // TODO(davies): add tests for ArrayType, MapType and StructType }
pronix/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/LiteralExpressionSuite.scala
Scala
apache-2.0
5,419
/* * Copyright 2016 Guy Van den Broeck and Wannes Meert (UCLA and KU Leuven) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.ucla.cs.starai.forclift.learning import edu.ucla.cs.starai.forclift._ import edu.ucla.cs.starai.forclift.languages.mln._ import edu.ucla.cs.starai.forclift.inference._ import org.junit.runner.RunWith import org.scalatest.Matchers import java.io._ import scala.io._ import org.scalatest.FunSpec import org.scalatest.junit.JUnitRunner import edu.ucla.cs.starai.forclift.util.Resource @RunWith(classOf[JUnitRunner]) class TestWeightLearningWebKB2Monotone extends FunSpec with Matchers { describe("WebKB2 bug Jan") { val parser = new MLNParser parser.setLearnModus(true) val mln1String = """ project(task,person) courseTA(course,person) student(person) courseProf(course,person) faculty(person) -6.422947152786691 project(x,y) -5.695767167916951 courseTA(x,y) 1.4831259809182646 faculty(x) -11.139728791062351 courseProf(x,y) 3.7367248437726714 student(x) 1.838195270720632 ((!courseTA(x,y) v courseProf(z,y)) v student(y)) -0.054950476803692076 ((!project(x,y) v !student(y)) v courseProf(z,y)) 0.5008872927556092 ((!faculty(x) v !student(x)) v courseProf(y,x)) -0.041883127014925955 ((!project(x,y) v faculty(y)) v project(z,y)) -3.1872644041834737 (!project(x,y) v student(y)) -7.395636151029688 ((!courseProf(x,y) v courseTA(x,y)) v student(y)) 1.597474249223895 ((!courseProf(x,y) v faculty(y)) v student(y)) """ val mln2String = """ project(task,person) courseTA(course,person) student(person) courseProf(course,person) faculty(person) -6.382050640574264 project(x,y) -5.704564912287659 courseTA(x,y) 1.2076557998442516 faculty(x) -11.24823353231588 courseProf(x,y) 3.4313874803679316 student(x) 0.332429990911885 ((!courseTA(x,y) v courseProf(z,y)) v student(y)) -0.05797672237840561 ((!project(x,y) v !student(y)) v courseProf(z,y)) -0.032766778435851684 ((!project(x,y) v faculty(y)) v project(z,y)) 1.6376765954375099 ((!faculty(x) v !student(x)) v courseProf(y,x)) -3.1611671063568094 (!project(x,y) v student(y)) -7.256931187083652 ((!courseProf(x,y) v courseTA(x,y)) v student(y)) 1.3960108536694396 ((!courseProf(x,y) v faculty(y)) v student(y)) -0.08649267695334978 (!courseProf(x,y) v !courseProf(z,y)) """ // Database file for training val db1String = Resource.fromFile("/webkb2/fold1.db").mkString val db2String = Resource.fromFile("/webkb2/fold2.db").mkString val db3String = Resource.fromFile("/webkb2/fold3.db").mkString val db4String = Resource.fromFile("/webkb2/fold4.db").mkString var mln1 = MLN() var mln2 = MLN() var db1 = MLN() var db2 = MLN() var db3 = MLN() var db4 = MLN() mln1 = parser.parseMLN(mln2String) // mln2 = parser.parseMLN(mln2String) db1 = parser.parseDB(db1String) db2 = parser.parseDB(db2String) db3 = parser.parseDB(db3String) db4 = parser.parseDB(db4String) // this test runs for hours (sometimes) -- too slow ignore("Small MLN is learnable") { val learner = new LiftedLearning(mln1, Seq(db1, db3, db4), testdbMLNs=Seq(db2), verbose = true, normalizeLH = false) val (learnedMLN,ll) = learner.learnParameters() println(learnedMLN) for(dbLh <- learner.testDatabaseLikelihoods){ println("Test DBs") println(s"db: $dbLh") println(s"Z = ${dbLh.z.cachedWmc.logToDouble}") println(s"loglikelihood = ${dbLh.likelihood.logToDouble}") } } } }
UCLA-StarAI/Forclift
src/test/scala/edu/ucla/cs/starai/forclift/learning/TestWeightLearningWebKB2Monotone.scala
Scala
apache-2.0
4,037
package paperdoll.core.effect import Predef.identity import shapeless.{ Coproduct, CNil, :+:, Inl } import scalaz.{ Monad, Leibniz, Forall, Unapply } import scalaz.syntax.monad._ import paperdoll.queue.Queue import paperdoll.core.layer.{ Layer, Layers, Subset } import scalaz.Functor import paperdoll.core.nondeterminism.NDet_ import scalaz.MonadPlus import paperdoll.core.nondeterminism.Nondeterminism import Arrs.compose import paperdoll.queue.DestructuredHead sealed trait Arr_[R <: Coproduct, L <: Layers[R]] { final type O[A, B] = A ⇒ Effects[R, L, B] } object Arr_ { final type One[L <: Layer] = Arr_[L :+: CNil, Layers.One[L]] final type Two[L1 <: Layer, L2 <: Layer] = Arr_[L1 :+: L2 :+: CNil, Layers.Two[L1, L2]] } object Arrs { final type One[L <: Layer, A, B] = Queue[Arr_.One[L]#O, A, B] /** * Collapse an Arrs (a queue of Arr) to a single Arr. */ def compose[R <: Coproduct, L <: Layers[R], A, B](arrs: Arrs[R, L, A, B]): Arr[R, L, A, B] = { value: A ⇒ arrs.destructureHead match { case nil: DestructuredHead.Nil[Queue, Arr_[R, L]#O, A, B] => Leibniz.symm[Nothing, Any, B, A](nil.witness)(value).point[Effects_[R, L]#O] case cons: DestructuredHead.Cons[Queue, Arr_[R, L]#O, A, B, w] => val ctail = compose(cons.tail) cons.head(value).fold( ctail, new Forall[({ type K[X] = (L#O[X], Arrs[R, L, X, w]) ⇒ Effects[R, L, B] })#K] { override def apply[X] = (eff, cont) ⇒ Impure[R, L, X, B](eff, cont :+ ctail) }) } } } /** * Intermediate step as a helper for type inference of Effects#extend */ final class ExtendingEffects[R <: Coproduct, L <: Layers[R], S <: Coproduct, A](eff: Effects[R, L, A]) { def apply[L0 <: Layers[R]]()(implicit su: Subset[S, R] { type LT = L0 }, le: Leibniz[Nothing, Layers[R], L0, L]): Effects[S, su.LS, A] = eff.inject(le.subst[({ type K[LL] = Subset[S, R] { type LT = LL type LS = su.LS } })#K](su)) } /** * A lazy value of type A via a (possibly empty) queue of effects from the list given by R/L * (something like an effectful continuation) * Evaluating this by providing implementations of each effect will eventually yield a value of type A */ sealed trait Effects[R <: Coproduct, L <: Layers[R], A] { /** * This is a "shallow" catamorphism. In practice the main use cases for this are recursive, * folding all the way down, but I found it very difficult to express the required type * for that case. * While calling directly is supported, the most common use case for this is covered by GenericBind. */ def fold[B](pure: A ⇒ B, impure: Forall[({ type K[X] = (L#O[X], Arrs[R, L, X, A]) ⇒ B })#K]): B private[effect] def inject[S <: Coproduct](implicit su: Subset[S, R] { type LT = L }): Effects[S, su.LS, A] /** * Extend this effectful value to one in a larger stack of effects S. * Can also be used to reorder the effect stack (by having S just be a reordering of R) */ final def extend[S <: Coproduct] = new ExtendingEffects[R, L, S, A](this) /** * Run this effectful value to produce an A. Only available once all effects have been handled * (i.e. R will be CNil) and therefore this Effects must actually be a Pure. */ final def run(implicit l: Leibniz[Nothing, Layers[R], L, Layers[R] { type O[X] = CNil }]): A = fold(identity, new Forall[({ type K[X] = (L#O[X], Arrs[R, L, X, A]) ⇒ A })#K] { override def apply[X] = (eff, cont) ⇒ l.subst[({ type J[K <: Layers[R]] = K#O[X] })#J](eff).impossible }) } /** * An actual A - this is the "nil" case of Effects * Note that this means that a value of type Effects is not necessarily lazy - * Pure(a).map(f) or Pure(a).flatMap(f) will evaluate f(a) immediately (returning Pure(f(a)) * or f(a) respectively). * It's only the impure effects that form "suspension points". */ final case class Pure[R <: Coproduct, L <: Layers[R], A](a: A) extends Effects[R, L, A] { override def fold[B](pure: A ⇒ B, impure: Forall[({ type K[X] = (L#O[X], Arrs[R, L, X, A]) ⇒ B })#K]) = pure(a) private[effect] override def inject[S <: Coproduct](implicit su: Subset[S, R] { type LT = L }): Effects[S, su.LS, A] = Pure(a) } /** * The "cons" case: an effectful value and a continuation that will eventually lead to an A. * Note that the intermediate type X is hidden from the outside - one is expected * to only access it via the fold method. * While instantiating directly is supported, most use cases should use the simpler Effects#send API */ final case class Impure[R <: Coproduct, L <: Layers[R], X, A]( eff: L#O[X], cont: Arrs[R, L, X, A]) extends Effects[R, L, A] { override def fold[B](pure: A ⇒ B, impure: Forall[({ type K[Y] = (L#O[Y], Arrs[R, L, Y, A]) ⇒ B })#K]) = impure.apply[X](eff, cont) private[effect] override def inject[S <: Coproduct](implicit su: Subset[S, R] { type LT = L }): Effects[S, su.LS, A] = Impure[S, su.LS, X, A](su.inject(eff), Queue.One[Arr_[S, su.LS]#O, X, A](compose(cont) andThen { _.inject[S] })) } sealed trait Effects_[R <: Coproduct, L <: Layers[R]] { final type O[A] = Effects[R, L, A] } private[effect] class EffectsMonad[R <: Coproduct, L <: Layers[R]] extends Monad[(Effects_[R, L])#O] { override def point[A](a: ⇒ A) = Pure[R, L, A](a) override def bind[A, B](fa: Effects[R, L, A])(f: A ⇒ Effects[R, L, B]) = fa.fold[Effects[R, L, B]](f, new Forall[({ type K[X] = (L#O[X], Arrs[R, L, X, A]) ⇒ Effects[R, L, B] })#K] { override def apply[X] = (eff, cont) ⇒ Impure[R, L, X, B](eff, cont :+ f) }) } /** * Lower priority implicit instances for Effects */ trait Effects0 { /** * All Effects form monads (or to be precise: for any fixed stack, the set of possible Effects * for that stack forms a monad), but Effects that include nondeterminism in their effect stack * also form MonadPlus which should be higher priority */ implicit def monadEffects[R <: Coproduct, L <: Layers[R]]: Monad[(Effects_[R, L])#O] = new EffectsMonad[R, L] } object Effects extends Effects0 { /** * If the effect stack R includes Nondeterminism_ then the set of possible Effects * for that stack forms a MonadPlus */ implicit def monadPlus[R <: Coproduct, L <: Layers[R], LT0 <: Layers[NDet_ :+: CNil]]( implicit su: Subset[R, NDet_ :+: CNil] { type LS = L type LT = LT0 }, le: Leibniz[Nothing, Layers[NDet_ :+: CNil], LT0, Layers.One[NDet_]]): MonadPlus[Effects_[R, L]#O] = new EffectsMonad[R, L] with MonadPlus[Effects_[R, L]#O] { override def plus[A](a: Effects[R, L, A], b: ⇒ Effects[R, L, A]) = bind(Nondeterminism.Plus.extend[R].apply[LT0]())({ x ⇒ if (x) a else b }) override def empty[A] = Nondeterminism.Zero[A].extend[R].apply[LT0]() } /** * One[L, A] is the type of an Effects with layer stack just L, * and value type A, i.e. Effects[L :+: CNil, ..., A] */ type One[L <: Layer, A] = Effects[L :+: CNil, Layers.One[L], A] sealed trait One_[L <: Layer] { final type O[A] = One[L, A] } type Two[L1 <: Layer, L2 <: Layer, A] = Effects[L1 :+: L2 :+: CNil, Layers.Two[L1, L2], A] implicit def unapplyEffects[TC[_[_]], R <: Coproduct, L <: Layers[R], A0]( implicit instance: TC[Effects_[R, L]#O]) = new Unapply[TC, Effects[R, L, A0]] { override type A = A0 override type M[X] = Effects[R, L, X] override val TC = instance override val leibniz = Leibniz.refl[Effects[R, L, A0]] } /** * Lift an effectful value L#F[V] to an Effects[L :+: CNil, ..., V]. * Usually followed by a .extend to further lift this into a complete effect stack */ def send[L <: Layer, V](value: L#F[V]): Effects.One[L, V] = Impure[L :+: CNil, Layers.One[L], V, V](Inl(value), Queue.Empty[Arr_[L :+: CNil, Layers.One[L]]#O, V]) /** * Send that infers the types L and V. However since the inference relies on implicit * Functor instances, this will only work if L#F forms a (ScalaZ) Functor * (and the relevant implicit instance is in scope) */ def sendU[FV](value: FV)(implicit u: Unapply[Functor, FV]): Effects.One[Layer.Aux[u.M], u.A] = send[Layer.Aux[u.M], u.A](u.leibniz(value)) /** * Send a nested pair of effects F[G[A]], inferring the types based on implicit functor instances. */ def sendTU[FGA, GA](value: FGA)(implicit u1: Unapply[Functor, FGA] { type A = GA }, u2: Unapply[Functor, GA]): Effects.Two[Layer.Aux[u1.M], Layer.Aux[u2.M], u2.A] = { // Inlining this causes compilation to fail, I don't understand why def sendGA(ga: GA) = sendU(ga).extend[Layer.Aux[u1.M] :+: Layer.Aux[u2.M] :+: CNil]() sendU(value).extend[Layer.Aux[u1.M] :+: Layer.Aux[u2.M] :+: CNil]().flatMap(sendGA(_)) } /** * Usually effects can be interleaved, but some effects cannot be expressed * in an interleaveable way (similar to monads which do not have monad transformers). * In that case we may only have one such effect in the stack, and must handle * it last - but we can handle any monadic effect this way. */ def unsafeRun[L <: Layer, A](effects: One[L, A])(implicit monad: Monad[L#F]): L#F[A] = effects.fold( monad.point(_), new Forall[({ type K[X] = (L#F[X] :+: CNil, Arrs.One[L, X, A]) ⇒ L#F[A] })#K] { override def apply[X] = { (eff, cont) ⇒ eff.eliminate(_.flatMap { x ⇒ unsafeRun(compose(cont)(x)) }, _.impossible) } }) }
m50d/paperdoll
core/src/main/scala/paperdoll/core/effect/Effects.scala
Scala
apache-2.0
9,805
package com.splicemachine.spark2.splicemachine object ThisVersionSpecificItems { val schema = SparkVersionSpecificItems.schemaWithoutMetadata val jdbcBadDriverNameException = SparkVersionSpecificItems.connectionNotCreated }
splicemachine/spliceengine
splice_spark2/src/test/spark2.4/com/splicemachine/spark2/splicemachine/ThisVersionSpecificItems.scala
Scala
agpl-3.0
229
/* * Wire * Copyright (C) 2016 Wire Swiss GmbH * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.waz.utils import java.io.File import android.database.sqlite.SQLiteDatabase import android.database.sqlite.SQLiteDatabase._ import com.waz.utils.wrappers.DB import org.scalatest.Matchers trait DbLoader { self: Matchers => def loadDb(path: String): DB = { val input = new File(getClass.getResource(path).getFile) input should exist val file = File.createTempFile("temp", ".db") file.deleteOnExit() IoUtils.copy(input, file) SQLiteDatabase.openDatabase(file.getAbsolutePath, null, OPEN_READWRITE) } }
wireapp/wire-android-sync-engine
zmessaging/src/test/scala/com/waz/utils/DbLoader.scala
Scala
gpl-3.0
1,242
package lv.ddgatve.scp import java.io.BufferedReader import java.io.IOException import java.io.InputStreamReader import com.jcraft.jsch.UserInfo class CliUserInfo extends UserInfo { // this method is never called override def getPassphrase(): String = { return null; } // this method should return the password, which it reads from the console override def getPassword(): String = { var result = "qwerty"; val br = new BufferedReader(new InputStreamReader( System.in)); print("Enter Password:"); try { result = br.readLine(); } catch { case nfe: IOException => System.err.println("Invalid Format!"); } return result; } // this method is never called override def promptPassphrase(arg0: String): Boolean = { return false; } // this method means that we'll authenticate - return true; override def promptPassword(arg0: String): Boolean = { return true; } // this method means that we'll accept the certificate - return true; override def promptYesNo(arg0: String): Boolean = { return true; } // this method is never called override def showMessage(arg0: String): Unit = { println("Displaying message: " + arg0); } }
kapsitis/demografija-lv
src/main/scala/lv/ddgatve/scp/CliUserInfo.scala
Scala
apache-2.0
1,232
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD import org.apache.spark.sql.{execution, AnalysisException, Strategy} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.encoders.RowEncoder import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression import org.apache.spark.sql.catalyst.planning._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.catalyst.streaming.InternalOutputModes import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec} import org.apache.spark.sql.execution.command._ import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight, BuildSide} import org.apache.spark.sql.execution.python._ import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.sources.MemoryPlanV2 import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery} import org.apache.spark.sql.types.StructType /** * Converts a logical plan into zero or more SparkPlans. This API is exposed for experimenting * with the query planner and is not designed to be stable across spark releases. Developers * writing libraries should instead consider using the stable APIs provided in * [[org.apache.spark.sql.sources]] */ abstract class SparkStrategy extends GenericStrategy[SparkPlan] { override protected def planLater(plan: LogicalPlan): SparkPlan = PlanLater(plan) } case class PlanLater(plan: LogicalPlan) extends LeafExecNode { override def output: Seq[Attribute] = plan.output protected override def doExecute(): RDD[InternalRow] = { throw new UnsupportedOperationException() } } abstract class SparkStrategies extends QueryPlanner[SparkPlan] { self: SparkPlanner => /** * Plans special cases of limit operators. */ object SpecialLimits extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case ReturnAnswer(rootPlan) => rootPlan match { case Limit(IntegerLiteral(limit), Sort(order, true, child)) if limit < conf.topKSortFallbackThreshold => TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil case Limit(IntegerLiteral(limit), Project(projectList, Sort(order, true, child))) if limit < conf.topKSortFallbackThreshold => TakeOrderedAndProjectExec(limit, order, projectList, planLater(child)) :: Nil case Limit(IntegerLiteral(limit), child) => CollectLimitExec(limit, planLater(child)) :: Nil case other => planLater(other) :: Nil } case Limit(IntegerLiteral(limit), Sort(order, true, child)) if limit < conf.topKSortFallbackThreshold => TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil case Limit(IntegerLiteral(limit), Project(projectList, Sort(order, true, child))) if limit < conf.topKSortFallbackThreshold => TakeOrderedAndProjectExec(limit, order, projectList, planLater(child)) :: Nil case _ => Nil } } /** * Select the proper physical plan for join based on joining keys and size of logical plan. * * At first, uses the [[ExtractEquiJoinKeys]] pattern to find joins where at least some of the * predicates can be evaluated by matching join keys. If found, join implementations are chosen * with the following precedence: * * - Broadcast hash join (BHJ): * BHJ is not supported for full outer join. For right outer join, we only can broadcast the * left side. For left outer, left semi, left anti and the internal join type ExistenceJoin, * we only can broadcast the right side. For inner like join, we can broadcast both sides. * Normally, BHJ can perform faster than the other join algorithms when the broadcast side is * small. However, broadcasting tables is a network-intensive operation. It could cause OOM * or perform worse than the other join algorithms, especially when the build/broadcast side * is big. * * For the supported cases, users can specify the broadcast hint (e.g. the user applied the * [[org.apache.spark.sql.functions.broadcast()]] function to a DataFrame) and session-based * [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold to adjust whether BHJ is used and * which join side is broadcast. * * 1) Broadcast the join side with the broadcast hint, even if the size is larger than * [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]]. If both sides have the hint (only when the type * is inner like join), the side with a smaller estimated physical size will be broadcast. * 2) Respect the [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold and broadcast the side * whose estimated physical size is smaller than the threshold. If both sides are below the * threshold, broadcast the smaller side. If neither is smaller, BHJ is not used. * * - Shuffle hash join: if the average size of a single partition is small enough to build a hash * table. * * - Sort merge: if the matching join keys are sortable. * * If there is no joining keys, Join implementations are chosen with the following precedence: * - BroadcastNestedLoopJoin (BNLJ): * BNLJ supports all the join types but the impl is OPTIMIZED for the following scenarios: * For right outer join, the left side is broadcast. For left outer, left semi, left anti * and the internal join type ExistenceJoin, the right side is broadcast. For inner like * joins, either side is broadcast. * * Like BHJ, users still can specify the broadcast hint and session-based * [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold to impact which side is broadcast. * * 1) Broadcast the join side with the broadcast hint, even if the size is larger than * [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]]. If both sides have the hint (i.e., just for * inner-like join), the side with a smaller estimated physical size will be broadcast. * 2) Respect the [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold and broadcast the side * whose estimated physical size is smaller than the threshold. If both sides are below the * threshold, broadcast the smaller side. If neither is smaller, BNLJ is not used. * * - CartesianProduct: for inner like join, CartesianProduct is the fallback option. * * - BroadcastNestedLoopJoin (BNLJ): * For the other join types, BNLJ is the fallback option. Here, we just pick the broadcast * side with the broadcast hint. If neither side has a hint, we broadcast the side with * the smaller estimated physical size. */ object JoinSelection extends Strategy with PredicateHelper { /** * Matches a plan whose output should be small enough to be used in broadcast join. */ private def canBroadcast(plan: LogicalPlan): Boolean = { plan.stats.sizeInBytes >= 0 && plan.stats.sizeInBytes <= conf.autoBroadcastJoinThreshold } /** * Matches a plan whose single partition should be small enough to build a hash table. * * Note: this assume that the number of partition is fixed, requires additional work if it's * dynamic. */ private def canBuildLocalHashMap(plan: LogicalPlan): Boolean = { plan.stats.sizeInBytes < conf.autoBroadcastJoinThreshold * conf.numShufflePartitions } /** * Returns whether plan a is much smaller (3X) than plan b. * * The cost to build hash map is higher than sorting, we should only build hash map on a table * that is much smaller than other one. Since we does not have the statistic for number of rows, * use the size of bytes here as estimation. */ private def muchSmaller(a: LogicalPlan, b: LogicalPlan): Boolean = { a.stats.sizeInBytes * 3 <= b.stats.sizeInBytes } private def canBuildRight(joinType: JoinType): Boolean = joinType match { case _: InnerLike | LeftOuter | LeftSemi | LeftAnti | _: ExistenceJoin => true case _ => false } private def canBuildLeft(joinType: JoinType): Boolean = joinType match { case _: InnerLike | RightOuter => true case _ => false } private def broadcastSide( canBuildLeft: Boolean, canBuildRight: Boolean, left: LogicalPlan, right: LogicalPlan): BuildSide = { def smallerSide = if (right.stats.sizeInBytes <= left.stats.sizeInBytes) BuildRight else BuildLeft if (canBuildRight && canBuildLeft) { // Broadcast smaller side base on its estimated physical size // if both sides have broadcast hint smallerSide } else if (canBuildRight) { BuildRight } else if (canBuildLeft) { BuildLeft } else { // for the last default broadcast nested loop join smallerSide } } private def canBroadcastByHints(joinType: JoinType, left: LogicalPlan, right: LogicalPlan) : Boolean = { val buildLeft = canBuildLeft(joinType) && left.stats.hints.broadcast val buildRight = canBuildRight(joinType) && right.stats.hints.broadcast buildLeft || buildRight } private def broadcastSideByHints(joinType: JoinType, left: LogicalPlan, right: LogicalPlan) : BuildSide = { val buildLeft = canBuildLeft(joinType) && left.stats.hints.broadcast val buildRight = canBuildRight(joinType) && right.stats.hints.broadcast broadcastSide(buildLeft, buildRight, left, right) } private def canBroadcastBySizes(joinType: JoinType, left: LogicalPlan, right: LogicalPlan) : Boolean = { val buildLeft = canBuildLeft(joinType) && canBroadcast(left) val buildRight = canBuildRight(joinType) && canBroadcast(right) buildLeft || buildRight } private def broadcastSideBySizes(joinType: JoinType, left: LogicalPlan, right: LogicalPlan) : BuildSide = { val buildLeft = canBuildLeft(joinType) && canBroadcast(left) val buildRight = canBuildRight(joinType) && canBroadcast(right) broadcastSide(buildLeft, buildRight, left, right) } def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { // --- BroadcastHashJoin -------------------------------------------------------------------- // broadcast hints were specified case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right) if canBroadcastByHints(joinType, left, right) => val buildSide = broadcastSideByHints(joinType, left, right) Seq(joins.BroadcastHashJoinExec( leftKeys, rightKeys, joinType, buildSide, condition, planLater(left), planLater(right))) // broadcast hints were not specified, so need to infer it from size and configuration. case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right) if canBroadcastBySizes(joinType, left, right) => val buildSide = broadcastSideBySizes(joinType, left, right) Seq(joins.BroadcastHashJoinExec( leftKeys, rightKeys, joinType, buildSide, condition, planLater(left), planLater(right))) // --- ShuffledHashJoin --------------------------------------------------------------------- case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right) if !conf.preferSortMergeJoin && canBuildRight(joinType) && canBuildLocalHashMap(right) && muchSmaller(right, left) || !RowOrdering.isOrderable(leftKeys) => Seq(joins.ShuffledHashJoinExec( leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right))) case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right) if !conf.preferSortMergeJoin && canBuildLeft(joinType) && canBuildLocalHashMap(left) && muchSmaller(left, right) || !RowOrdering.isOrderable(leftKeys) => Seq(joins.ShuffledHashJoinExec( leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right))) // --- SortMergeJoin ------------------------------------------------------------ case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right) if RowOrdering.isOrderable(leftKeys) => joins.SortMergeJoinExec( leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil // --- Without joining keys ------------------------------------------------------------ // Pick BroadcastNestedLoopJoin if one side could be broadcast case j @ logical.Join(left, right, joinType, condition) if canBroadcastByHints(joinType, left, right) => val buildSide = broadcastSideByHints(joinType, left, right) joins.BroadcastNestedLoopJoinExec( planLater(left), planLater(right), buildSide, joinType, condition) :: Nil case j @ logical.Join(left, right, joinType, condition) if canBroadcastBySizes(joinType, left, right) => val buildSide = broadcastSideBySizes(joinType, left, right) joins.BroadcastNestedLoopJoinExec( planLater(left), planLater(right), buildSide, joinType, condition) :: Nil // Pick CartesianProduct for InnerJoin case logical.Join(left, right, _: InnerLike, condition) => joins.CartesianProductExec(planLater(left), planLater(right), condition) :: Nil case logical.Join(left, right, joinType, condition) => val buildSide = broadcastSide( left.stats.hints.broadcast, right.stats.hints.broadcast, left, right) // This join could be very slow or OOM joins.BroadcastNestedLoopJoinExec( planLater(left), planLater(right), buildSide, joinType, condition) :: Nil // --- Cases where this strategy does not apply --------------------------------------------- case _ => Nil } } /** * Used to plan streaming aggregation queries that are computed incrementally as part of a * [[StreamingQuery]]. Currently this rule is injected into the planner * on-demand, only when planning in a [[org.apache.spark.sql.execution.streaming.StreamExecution]] */ object StatefulAggregationStrategy extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case _ if !plan.isStreaming => Nil case EventTimeWatermark(columnName, delay, child) => EventTimeWatermarkExec(columnName, delay, planLater(child)) :: Nil case PhysicalAggregation( namedGroupingExpressions, aggregateExpressions, rewrittenResultExpressions, child) => if (aggregateExpressions.exists(PythonUDF.isGroupedAggPandasUDF)) { throw new AnalysisException( "Streaming aggregation doesn't support group aggregate pandas UDF") } val stateVersion = conf.getConf(SQLConf.STREAMING_AGGREGATION_STATE_FORMAT_VERSION) aggregate.AggUtils.planStreamingAggregation( namedGroupingExpressions, aggregateExpressions.map(expr => expr.asInstanceOf[AggregateExpression]), rewrittenResultExpressions, stateVersion, planLater(child)) case _ => Nil } } /** * Used to plan the streaming deduplicate operator. */ object StreamingDeduplicationStrategy extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case Deduplicate(keys, child) if child.isStreaming => StreamingDeduplicateExec(keys, planLater(child)) :: Nil case _ => Nil } } /** * Used to plan the streaming global limit operator for streams in append mode. * We need to check for either a direct Limit or a Limit wrapped in a ReturnAnswer operator, * following the example of the SpecialLimits Strategy above. * Streams with limit in Append mode use the stateful StreamingGlobalLimitExec. * Streams with limit in Complete mode use the stateless CollectLimitExec operator. * Limit is unsupported for streams in Update mode. */ case class StreamingGlobalLimitStrategy(outputMode: OutputMode) extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case ReturnAnswer(rootPlan) => rootPlan match { case Limit(IntegerLiteral(limit), child) if plan.isStreaming && outputMode == InternalOutputModes.Append => StreamingGlobalLimitExec(limit, LocalLimitExec(limit, planLater(child))) :: Nil case _ => Nil } case Limit(IntegerLiteral(limit), child) if plan.isStreaming && outputMode == InternalOutputModes.Append => StreamingGlobalLimitExec(limit, LocalLimitExec(limit, planLater(child))) :: Nil case _ => Nil } } object StreamingJoinStrategy extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = { plan match { case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right) if left.isStreaming && right.isStreaming => new StreamingSymmetricHashJoinExec( leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil case Join(left, right, _, _) if left.isStreaming && right.isStreaming => throw new AnalysisException( "Stream-stream join without equality predicate is not supported", plan = Some(plan)) case _ => Nil } } } /** * Used to plan the aggregate operator for expressions based on the AggregateFunction2 interface. */ object Aggregation extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case PhysicalAggregation(groupingExpressions, aggExpressions, resultExpressions, child) if aggExpressions.forall(expr => expr.isInstanceOf[AggregateExpression]) => val aggregateExpressions = aggExpressions.map(expr => expr.asInstanceOf[AggregateExpression]) val (functionsWithDistinct, functionsWithoutDistinct) = aggregateExpressions.partition(_.isDistinct) if (functionsWithDistinct.map(_.aggregateFunction.children.toSet).distinct.length > 1) { // This is a sanity check. We should not reach here when we have multiple distinct // column sets. Our `RewriteDistinctAggregates` should take care this case. sys.error("You hit a query analyzer bug. Please report your query to " + "Spark user mailing list.") } val aggregateOperator = if (functionsWithDistinct.isEmpty) { aggregate.AggUtils.planAggregateWithoutDistinct( groupingExpressions, aggregateExpressions, resultExpressions, planLater(child)) } else { aggregate.AggUtils.planAggregateWithOneDistinct( groupingExpressions, functionsWithDistinct, functionsWithoutDistinct, resultExpressions, planLater(child)) } aggregateOperator case PhysicalAggregation(groupingExpressions, aggExpressions, resultExpressions, child) if aggExpressions.forall(expr => expr.isInstanceOf[PythonUDF]) => val udfExpressions = aggExpressions.map(expr => expr.asInstanceOf[PythonUDF]) Seq(execution.python.AggregateInPandasExec( groupingExpressions, udfExpressions, resultExpressions, planLater(child))) case PhysicalAggregation(_, _, _, _) => // If cannot match the two cases above, then it's an error throw new AnalysisException( "Cannot use a mixture of aggregate function and group aggregate pandas UDF") case _ => Nil } } object Window extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case PhysicalWindow( WindowFunctionType.SQL, windowExprs, partitionSpec, orderSpec, child) => execution.window.WindowExec( windowExprs, partitionSpec, orderSpec, planLater(child)) :: Nil case PhysicalWindow( WindowFunctionType.Python, windowExprs, partitionSpec, orderSpec, child) => execution.python.WindowInPandasExec( windowExprs, partitionSpec, orderSpec, planLater(child)) :: Nil case _ => Nil } } protected lazy val singleRowRdd = sparkContext.parallelize(Seq(InternalRow()), 1) object InMemoryScans extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case PhysicalOperation(projectList, filters, mem: InMemoryRelation) => pruneFilterProject( projectList, filters, identity[Seq[Expression]], // All filters still need to be evaluated. InMemoryTableScanExec(_, filters, mem)) :: Nil case _ => Nil } } /** * This strategy is just for explaining `Dataset/DataFrame` created by `spark.readStream`. * It won't affect the execution, because `StreamingRelation` will be replaced with * `StreamingExecutionRelation` in `StreamingQueryManager` and `StreamingExecutionRelation` will * be replaced with the real relation using the `Source` in `StreamExecution`. */ object StreamingRelationStrategy extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case s: StreamingRelation => StreamingRelationExec(s.sourceName, s.output) :: Nil case s: StreamingExecutionRelation => StreamingRelationExec(s.toString, s.output) :: Nil case s: StreamingRelationV2 => StreamingRelationExec(s.sourceName, s.output) :: Nil case _ => Nil } } /** * Strategy to convert [[FlatMapGroupsWithState]] logical operator to physical operator * in streaming plans. Conversion for batch plans is handled by [[BasicOperators]]. */ object FlatMapGroupsWithStateStrategy extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case FlatMapGroupsWithState( func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, stateEnc, outputMode, _, timeout, child) => val stateVersion = conf.getConf(SQLConf.FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION) val execPlan = FlatMapGroupsWithStateExec( func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, None, stateEnc, stateVersion, outputMode, timeout, batchTimestampMs = None, eventTimeWatermark = None, planLater(child)) execPlan :: Nil case _ => Nil } } /** * Strategy to convert EvalPython logical operator to physical operator. */ object PythonEvals extends Strategy { override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case ArrowEvalPython(udfs, output, child) => ArrowEvalPythonExec(udfs, output, planLater(child)) :: Nil case BatchEvalPython(udfs, output, child) => BatchEvalPythonExec(udfs, output, planLater(child)) :: Nil case _ => Nil } } object BasicOperators extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { case d: DataWritingCommand => DataWritingCommandExec(d, planLater(d.query)) :: Nil case r: RunnableCommand => ExecutedCommandExec(r) :: Nil case MemoryPlan(sink, output) => val encoder = RowEncoder(sink.schema) LocalTableScanExec(output, sink.allData.map(r => encoder.toRow(r).copy())) :: Nil case MemoryPlanV2(sink, output) => val encoder = RowEncoder(StructType.fromAttributes(output)) LocalTableScanExec(output, sink.allData.map(r => encoder.toRow(r).copy())) :: Nil case logical.Distinct(child) => throw new IllegalStateException( "logical distinct operator should have been replaced by aggregate in the optimizer") case logical.Intersect(left, right, false) => throw new IllegalStateException( "logical intersect operator should have been replaced by semi-join in the optimizer") case logical.Intersect(left, right, true) => throw new IllegalStateException( "logical intersect operator should have been replaced by union, aggregate" + " and generate operators in the optimizer") case logical.Except(left, right, false) => throw new IllegalStateException( "logical except operator should have been replaced by anti-join in the optimizer") case logical.Except(left, right, true) => throw new IllegalStateException( "logical except (all) operator should have been replaced by union, aggregate" + " and generate operators in the optimizer") case logical.DeserializeToObject(deserializer, objAttr, child) => execution.DeserializeToObjectExec(deserializer, objAttr, planLater(child)) :: Nil case logical.SerializeFromObject(serializer, child) => execution.SerializeFromObjectExec(serializer, planLater(child)) :: Nil case logical.MapPartitions(f, objAttr, child) => execution.MapPartitionsExec(f, objAttr, planLater(child)) :: Nil case logical.MapPartitionsInR(f, p, b, is, os, objAttr, child) => execution.MapPartitionsExec( execution.r.MapPartitionsRWrapper(f, p, b, is, os), objAttr, planLater(child)) :: Nil case logical.FlatMapGroupsInR(f, p, b, is, os, key, value, grouping, data, objAttr, child) => execution.FlatMapGroupsInRExec(f, p, b, is, os, key, value, grouping, data, objAttr, planLater(child)) :: Nil case logical.FlatMapGroupsInPandas(grouping, func, output, child) => execution.python.FlatMapGroupsInPandasExec(grouping, func, output, planLater(child)) :: Nil case logical.MapElements(f, _, _, objAttr, child) => execution.MapElementsExec(f, objAttr, planLater(child)) :: Nil case logical.AppendColumns(f, _, _, in, out, child) => execution.AppendColumnsExec(f, in, out, planLater(child)) :: Nil case logical.AppendColumnsWithObject(f, childSer, newSer, child) => execution.AppendColumnsWithObjectExec(f, childSer, newSer, planLater(child)) :: Nil case logical.MapGroups(f, key, value, grouping, data, objAttr, child) => execution.MapGroupsExec(f, key, value, grouping, data, objAttr, planLater(child)) :: Nil case logical.FlatMapGroupsWithState( f, key, value, grouping, data, output, _, _, _, timeout, child) => execution.MapGroupsExec( f, key, value, grouping, data, output, timeout, planLater(child)) :: Nil case logical.CoGroup(f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr, left, right) => execution.CoGroupExec( f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr, planLater(left), planLater(right)) :: Nil case logical.Repartition(numPartitions, shuffle, child) => if (shuffle) { ShuffleExchangeExec(RoundRobinPartitioning(numPartitions), planLater(child)) :: Nil } else { execution.CoalesceExec(numPartitions, planLater(child)) :: Nil } case logical.Sort(sortExprs, global, child) => execution.SortExec(sortExprs, global, planLater(child)) :: Nil case logical.Project(projectList, child) => execution.ProjectExec(projectList, planLater(child)) :: Nil case logical.Filter(condition, child) => execution.FilterExec(condition, planLater(child)) :: Nil case f: logical.TypedFilter => execution.FilterExec(f.typedCondition(f.deserializer), planLater(f.child)) :: Nil case e @ logical.Expand(_, _, child) => execution.ExpandExec(e.projections, e.output, planLater(child)) :: Nil case logical.Sample(lb, ub, withReplacement, seed, child) => execution.SampleExec(lb, ub, withReplacement, seed, planLater(child)) :: Nil case logical.LocalRelation(output, data, _) => LocalTableScanExec(output, data) :: Nil case logical.LocalLimit(IntegerLiteral(limit), child) => execution.LocalLimitExec(limit, planLater(child)) :: Nil case logical.GlobalLimit(IntegerLiteral(limit), child) => execution.GlobalLimitExec(limit, planLater(child)) :: Nil case logical.Union(unionChildren) => execution.UnionExec(unionChildren.map(planLater)) :: Nil case g @ logical.Generate(generator, _, outer, _, _, child) => execution.GenerateExec( generator, g.requiredChildOutput, outer, g.qualifiedGeneratorOutput, planLater(child)) :: Nil case _: logical.OneRowRelation => execution.RDDScanExec(Nil, singleRowRdd, "OneRowRelation") :: Nil case r: logical.Range => execution.RangeExec(r) :: Nil case r: logical.RepartitionByExpression => exchange.ShuffleExchangeExec(r.partitioning, planLater(r.child)) :: Nil case ExternalRDD(outputObjAttr, rdd) => ExternalRDDScanExec(outputObjAttr, rdd) :: Nil case r: LogicalRDD => RDDScanExec(r.output, r.rdd, "ExistingRDD", r.outputPartitioning, r.outputOrdering) :: Nil case h: ResolvedHint => planLater(h.child) :: Nil case _ => Nil } } }
michalsenkyr/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
Scala
apache-2.0
30,257
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.tools import joptsimple._ import kafka.utils.{Utils, Logging} import java.util.concurrent.CountDownLatch import kafka.consumer._ import kafka.serializer.StringDecoder /** * Program to read using the rich consumer and dump the results to standard out */ object ConsumerShell { def main(args: Array[String]): Unit = { val parser = new OptionParser val topicOpt = parser.accepts("topic", "REQUIRED: The topic to consume from.") .withRequiredArg .describedAs("topic") .ofType(classOf[String]) val consumerPropsOpt = parser.accepts("props", "REQUIRED: Properties file with the consumer properties.") .withRequiredArg .describedAs("properties") .ofType(classOf[String]) val partitionsOpt = parser.accepts("partitions", "Number of partitions to consume from.") .withRequiredArg .describedAs("count") .ofType(classOf[java.lang.Integer]) .defaultsTo(1) val options = parser.parse(args : _*) for(arg <- List(topicOpt, consumerPropsOpt)) { if(!options.has(arg)) { System.err.println("Missing required argument \\"" + arg + "\\"") parser.printHelpOn(System.err) System.exit(1) } } val partitions = options.valueOf(partitionsOpt).intValue val propsFile = options.valueOf(consumerPropsOpt) val topic = options.valueOf(topicOpt) println("Starting consumer...") val consumerConfig = new ConsumerConfig(Utils.loadProps(propsFile)) val consumerConnector: ConsumerConnector = Consumer.create(consumerConfig) val topicMessageStreams = consumerConnector.createMessageStreams(Predef.Map(topic -> partitions), new StringDecoder) var threadList = List[ZKConsumerThread]() for ((topic, streamList) <- topicMessageStreams) for (stream <- streamList) threadList ::= new ZKConsumerThread(stream) for (thread <- threadList) thread.start // attach shutdown handler to catch control-c Runtime.getRuntime().addShutdownHook(new Thread() { override def run() = { consumerConnector.shutdown threadList.foreach(_.shutdown) println("consumer threads shutted down") } }) } } class ZKConsumerThread(stream: KafkaMessageStream[String]) extends Thread with Logging { val shutdownLatch = new CountDownLatch(1) override def run() { println("Starting consumer thread..") var count: Int = 0 try { for (message <- stream) { println("consumed: " + message) count += 1 } }catch { case e:ConsumerTimeoutException => // this is ok case oe: Exception => error("error in ZKConsumerThread", oe) } shutdownLatch.countDown println("Received " + count + " messages") println("thread shutdown !" ) } def shutdown() { shutdownLatch.await } }
tnachen/kafka
core/src/main/scala/kafka/tools/ConsumerShell.scala
Scala
apache-2.0
3,886
package com.originate.scalypher.test.where import com.originate.scalypher.where.HasNoRelationships import com.originate.scalypher.path.{AnyNode, CypherNode} import com.originate.scalypher.types.IdentifiableMap import org.scalatest._ class HasNoRelationshipsSpec extends WordSpec with Matchers { "return a query to find AnyNodes with no relationships" in { val startNode = AnyNode() val identifiableMap: IdentifiableMap = Map(startNode -> "a1") val condition = startNode.hasNoRelationships() condition.toQuery(identifiableMap) shouldBe "NOT (a1)-[]-()" } "return a query to find CypherNodes with no relationships" in { val startNode = CypherNode("name") val identifiableMap: IdentifiableMap = Map(startNode -> "a1") val condition = startNode.hasNoRelationships() condition.toQuery(identifiableMap) shouldBe "NOT (a1)-[]-()" } "return a query to find a node with no relationships that have a certain label" in { val startNode = AnyNode() val identifiableMap: IdentifiableMap = Map(startNode -> "a1") val condition = startNode.hasNoRelationships() withLabel "label" condition.toQuery(identifiableMap) shouldBe "NOT (a1)-[:label]-()" } "return a query to find a node with no relationships that have multiple specified labels" in { val startNode = AnyNode() val identifiableMap: IdentifiableMap = Map(startNode -> "a1") val condition = startNode.hasNoRelationships(Seq("label1", "label2", "label3")) condition.toQuery(identifiableMap) shouldBe "NOT (a1)-[:label1|label2|label3]-()" } }
Originate/scalypher
src/test/scala/where/HasNoRelationshipsSpec.scala
Scala
mit
1,569
package scalasthlm.alpakka.playground import java.io.File import java.nio.file.FileSystem import org.apache.ftpserver.{ConnectionConfigFactory, FtpServer, FtpServerFactory} import org.apache.ftpserver.listener.ListenerFactory import org.apache.ftpserver.usermanager.PropertiesUserManagerFactory import scalasthlm.alpakka.playground.filesystem.JimfsFactory class FtpServerEmbedded { val DEFAULT_LISTENER = "default" def start(fs: FileSystem, port: Int): FtpServer = { val factory = new ListenerFactory() factory.setServerAddress("localhost") factory.setPort(port) val usersFile = new File(getClass.getClassLoader.getResource("ftpusers.properties").getFile) val pumf = new PropertiesUserManagerFactory pumf.setFile(usersFile) val userMgr = pumf.createUserManager val serverFactory = new FtpServerFactory() serverFactory.setUserManager(userMgr) serverFactory.setFileSystem(new JimfsFactory(fs)) serverFactory.setConnectionConfig(new ConnectionConfigFactory().createConnectionConfig) serverFactory.addListener(DEFAULT_LISTENER, factory.createListener) val ftpServer = serverFactory.createServer() ftpServer.start() ftpServer } } object FtpServerEmbedded extends FtpServerEmbedded
ScalaSthlm/alpakka-integration-patterns
playground/src/main/scala/scalasthlm/alpakka/playground/FtpServerEmbedded.scala
Scala
apache-2.0
1,252
package net.devkat.collection /** * http://stackoverflow.com/questions/9850786/is-there-such-a-thing-as-bidirectional-maps-in-scala */ object BiMap { private[BiMap] trait MethodDistinctor implicit final object MethodDistinctor extends MethodDistinctor } case class BiMap[X, Y](map: Map[X, Y]) { def this(tuples: (X,Y)*) = this(tuples.toMap) private val reverseMap = map map (_.swap) require(map.size == reverseMap.size, "no 1 to 1 relation") def apply(x: X): Y = map(x) def apply(y: Y)(implicit d: BiMap.MethodDistinctor): X = reverseMap(y) val domain = map.keys val codomain = reverseMap.keys }
devkat/scala-ocm
core/src/main/scala/net/devkat/collection/BiMap.scala
Scala
apache-2.0
617
/* * Element.scala * Elements of Figaro models. * * Created By: Avi Pfeffer ([email protected]) * Creation Date: Jan 1, 2009 * * Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc. * See http://www.cra.com or email [email protected] for information. * * See http://www.github.com/p2t2/figaro for a copy of the software license. */ package com.cra.figaro.language import com.cra.figaro.library.compound._ import scala.collection.mutable.Set import scala.language.implicitConversions /** * An Element is the core component of a probabilistic model. Elements can be understood as * defining a probabilistic process. Elements are parameterized by the type of Value the process * produces. * * Each Element is a mix of a random component and a deterministic component. The random component * has type Randomness. The generateRandomness method generates the Randomness according to a * probability distribution. The generateValue method is a deterministic function that generates the * output Value of the Element from the Randomness. Thus, Elements can be understood as defining a * generative process in which first the Randomness is generated and then the output Value is * generated given the Randomness. * * Elements also have a current outcome, represented by the value field. Naturally, the * generateValue function can refer to the current value of related Elements. However, generateValue * is not allowed to call generateValue on another Element. We use the notation generateValue(r | w) * to denote the value it produces given randomness r when the current value of related Elements is * w. * * Elements can have hard conditions and soft constraints. A condition is a predicate on Values that * must be satisfied by the output of the Element. Values that violate the condition have * probability zero. A constraint is a function that maps Values to Doubles. The probability of a * Value is multiplied by the constraint, and then normalized. * Conditions and constraints can be contingent on other elements taking on particular values. * Ordinarily, these contingencies will not be specified by the user, but automatically by other Figaro code. * In particular, specifying named evidence on a reference can result in contingencies. * * Thus, an Element represents a conditional probability distribution over Values given the current * values w of related Elements. The probability of an outcome v is defined by: * * P(v | w) is proportional to (\\sum_{r: generateValue(r | w) = v} P(generateRandomness() = r) * * constraint(v)) if condition(v); * 0 otherwise * * An element has a name and belongs to an element collection that is used to find the element the name. * * Elements can be cacheable or non-cacheable, which determines what type of Chain will be created for them. * If you create a new Element class that you want to be cached, you should declare it to implement the Cacheable or IfArgsCacheable traits. * * @param name The name of the element * @param collection The element collection to which this element belongs */ abstract class Element[T](val name: Name[T], val collection: ElementCollection) { /** * The type of values over which the element is defined. */ type Value = T /** * The type of conditions on the element. A condition is a function from a value to a Boolean. */ type Condition = T => Boolean /** * The type of soft constraints on the element. A constraint is a function from a value to a Double. */ type Constraint = T => Double /** * The type of randomness content of the element. */ type Randomness /** * The universe in which the element is defined. */ val universe = collection.universe override val hashCode = com.cra.figaro.util.getNextHashCode //We want this to only be called once. /** * The cacheability of the element. Chains create caches of their parent values, and it is useful to know when these values can be effectively cached and reused. * In general, continuous distributions are not cacheable. */ def isCachable(): Boolean = false /** * Generate the randomness content. */ def generateRandomness(): Randomness /** * Generate the next randomness given the current randomness. * Returns three values: The next randomness, the Metropolis-Hastings proposal probability * ratio, which is: * * P(new -> old) / P(old -> new) * * and the model probability ratio, which is: * * P(new) / P(old) * * The default implementation is to use generateRandomness and returns ones for the * proposal and model probability ratios. * */ def nextRandomness(rand: Randomness): (Randomness, Double, Double) = (generateRandomness(), 1.0, 1.0) /** * The current randomness content of the element. */ var randomness: Randomness = _ /** * Generate the value of the element deterministically given its randomness and the values of * its arguments. */ def generateValue(rand: Randomness): Value /** * The current value of the element. */ var value: Value = _ /** * First generate the randomness, then generate the value given the randomness. Store the results * in randomness and value. */ final def generate(): Unit = { if (!setFlag) { // Make sure we do not generate this element if we have already set its value args.foreach(arg => if (arg.value == null) arg.generate()) // make sure arguments have a valid value randomness = generateRandomness() value = generateValue(randomness) } } /* Complete context of this element */ private[language] var myContext: List[Element[_]] = List() /** The elements on which the existence of this element depends. */ def context = if (!active) { throw new NoSuchElementException } else myContext /* Stores the elements that were created in this element's context. Note this is not used * for chains, since they maintain their own context control. */ private val myDirectContextContents: Set[Element[_]] = Set() /** * Returns the set of elements directly created in the context of this element. */ def directContextContents: Set[Element[_]] = if (!active) { throw new NoSuchElementException } else myDirectContextContents private[figaro] def addContextContents(e: Element[_]): Unit = myDirectContextContents += e private[figaro] def removeContextContents(e: Element[_]): Unit = myDirectContextContents -= e /** * Returns true if this element is temporary, that is, was created in the context of another element. */ def isTemporary = !myContext.isEmpty /** * Clears all the temporary elements associated with this element (all elements created in it's context). */ def clearContext() = universe.deactivate(directContextContents) /* * Under the new design, conditions and constraints can be contingent on other elements taking on particular values. This correctly handles reference uncertainty where we * know that the element with a given name, whatever it is, satisfies a given condition, but we don't know what that element is. We apply the condition to every possible * element that could be referred to, but make it contingent on the reference actually referring to the variable. * * Since an element may be referred to in multiple ways, each of which can have its own contingency, we allow the conditions to contain multiple (Contingency, Condition) * pairs. */ private[figaro]type Contingency = Element.Contingency private[figaro]type ElemVal[T] = Element.ElemVal[T] /** * Returns the elements that this element is contingent on. These are elements that are required to have a certain value for a condition or constraint * to be relevant to this element. The contingency is required because conditions and constraints can be applied to references that are * uncertain. Every possible element that could be pointed to by a reference must be given the condition or constraint, but the condition * or constraint only applies if the elements earlier in the reference have the required value. * * Figaro takes care of handling all this under the * hood. However, some algorithms may need to know which elements an element is contingent on. For example, sampling algorithms may need to sample * those other elements first. This method is supplied to support this use case. */ def elementsIAmContingentOn: Set[Element[_]] = { val conditionElements = for { (condition, contingency) <- myConditions Element.ElemVal(element, value) <- contingency } yield element val constraintElements = for { (constraint, contingency) <- myConstraints Element.ElemVal(element, value) <- contingency } yield element Set((conditionElements ::: constraintElements): _*) } /* * Since a contingency is a type of use between elements, we need to add them to the uses and usedBy lists. * In the current implementation, they never get removed. It would be difficult to ensure that is always done correctly. * Not removing these elements from the relevant lists cannot affect correctness of algorithms, but it may impact their efficiency. * One can argue that removal of evidence is not a common use case and does not need to be optimized. */ private def ensureContingency[T](elem: Element[T]) { universe.registerUses(this, elem) } private var myConditions: List[(Condition, Contingency)] = List() /** All the conditions defined on this element.*/ def allConditions = myConditions /* * observation represents a specific observed value of this element, if the element * has an observation. It is essentially redundant, given that the observation will be * captured in a condition. However, for some algorithms, such as importance sampling, * it is useful to know that a condition is actually an observation of a specific value. * This is a common case, and to optimize it, we store the observation. * * If an element has any other condition besides this observation, we cannot use the * observation. However, it can have a constraint. */ private[figaro] var observation: Option[T] = None /* * Testing whether a condition is satisfied can use any type of value. The condition can only be satisfied if the value has the right type and the condition returns true. */ private def checkedCondition(condition: Condition, value: Any): Boolean = try { condition(value.asInstanceOf[Value]) } catch { case _: ClassCastException => false } /* * Determines whether a contingent condition is satisfied for a given value of this element. It is *not* satisfied only if all the contingent elements have their * appropriate values and the condition itself is not satisfied for the given value. */ private def satisfiesContingentCondition(condition: Condition, contingency: Contingency, value: Any): Boolean = { val contingencySatisfied = contingency.forall((e: ElemVal[_]) => e.elem.value == e.value) !contingencySatisfied || checkedCondition(condition, value) } /** * Tests whether all the element's contingent conditions are satisfied for the given value. */ def condition(value: Any) = { myConditions.forall((cc: (Condition, Contingency)) => satisfiesContingentCondition(cc._1, cc._2, value)) } /** * Determines whether the condition on the element is satisfied by the current value. */ def conditionSatisfied = condition(value) /** Add the given condition to the existing conditions of the element. By default, the contingency is empty. */ def addCondition(condition: Condition, contingency: Contingency = List()): Unit = { universe.makeConditioned(this) contingency.foreach(ev => ensureContingency(ev.elem)) observation = None myConditions ::= (condition, contingency) } /** * Remove all conditions associated with the given contingency. By default, the contingency is empty. */ def removeConditions(contingency: Contingency = List()): Unit = { myConditions = myConditions.filterNot(_._2 == contingency) observation = None if (myConditions.isEmpty) universe.makeUnconditioned(this) } /** * Set the condition associated with the contingency. Removes previous conditions associated with the contingency. By default, the contingency is empty. */ def setCondition(newCondition: Condition, contingency: Contingency = List()): Unit = { removeConditions(contingency) addCondition(newCondition, contingency) } private var myConstraints: List[(Constraint, Contingency)] = List() /** * The current soft constraints on the element. */ def allConstraints = myConstraints // Avoid issuing a warning every time this method is called, e.g. for every sample. private var constraintWarningIssued = false /* * Computes the result of the element's constraint on a given value. * A value of any type can be passed, but if the value is of an inappropriate type, the constraint result is negative infinity. * This method also issues a warning if the constraint is greater than log(1) = 0. */ private def checkedConstraint(constraint: Constraint, value: Any): Double = try { val result = constraint(value.asInstanceOf[Value]) if (result > 0 && !constraintWarningIssued) { //println("Warning: constraint value " + result + " is greater than 1. Algorithms that use an upper bound of 1 will be incorrect.") constraintWarningIssued = true } result } catch { case _: ClassCastException => Double.NegativeInfinity } /* * Determines the result of a contingent constraint for a given value of this element. If any of the contingent elements does not have its appropriate value, the result is 1, * otherwise it is the result of the constraint itself applied to the given value. */ private def contingentConstraintResult(constraint: Constraint, contingency: Contingency, value: Any): Double = { val contingencySatisfied = contingency.forall((e: ElemVal[_]) => e.elem.value == e.value) if (contingencySatisfied) checkedConstraint(constraint, value); else 0.0 } /** * Gets the result of all the element's contingent constraints for the given value. */ def constraint(value: Any) = { val results = for { (constr, conting) <- myConstraints } yield contingentConstraintResult(constr, conting, value) (results :\\ 0.0)(_ + _) } /** * Determines the value of the constraint on the element applied to the current value. */ def constraintValue = constraint(value) /** * Compute the constraints on the new value divided by the constraints on the old value. */ def score(oldValue: Value, newValue: Value): Double = constraint(newValue) - constraint(oldValue) /** * Add a contingent constraint to the element. By default, the contingency is empty. */ def addConstraint(constraint: Constraint, contingency: Contingency = List()): Unit = { universe.makeConstrained(this) contingency.foreach(ev => ensureContingency(ev.elem)) myConstraints ::= (ProbConstraintType(constraint), contingency) } /** * Add a log contingent constraint to the element. By default, the contingency is empty. */ def addLogConstraint(constraint: Constraint, contingency: Contingency = List()): Unit = { universe.makeConstrained(this) contingency.foreach(ev => ensureContingency(ev.elem)) myConstraints ::= (LogConstraintType(constraint), contingency) } /** * Remove all constraints associated with the given contingency. By default, the contingency is empty. */ def removeConstraints(contingency: Contingency = List()): Unit = { myConstraints = myConstraints.filterNot(_._2 == contingency) if (myConstraints.isEmpty) universe.makeUnconstrained(this) } protected def removeConstraint(constraint: Constraint, contingency: Contingency = List()): Unit = { myConstraints = myConstraints.filterNot((c: (Constraint,Contingency)) => c._2 == contingency && c._1 == constraint) if (myConstraints.isEmpty) universe.makeUnconstrained(this) } /** * Set the constraint associated with the contingency. Removes previous constraints associated with the contingency. By default, the contingency is empty. */ def setConstraint(newConstraint: Constraint, contingency: Contingency = List()): Unit = { removeConstraints(contingency) addConstraint(newConstraint, contingency) } /** * Set the log constraint associated with the contingency. Removes previous constraints associated with the contingency. By default, the contingency is empty. */ def setLogConstraint(newConstraint: Constraint, contingency: Contingency = List()): Unit = { removeConstraints(contingency) addLogConstraint(newConstraint, contingency) } /** * Condition the element by observing a particular value. Propagates the effect to dependent elements and ensures that no other value for the element can be generated. */ def observe(observation: Value): Unit = { removeConditions() set(observation) universe.makeConditioned(this) this.observation = Some(observation) myConditions ::= ((v: Value) => v == observation, List()) } /** * Removes conditions on the element and allows different values of the element to be generated. */ def unobserve(): Unit = { unset() removeConditions() } private var setFlag: Boolean = false /** * Allows different values of the element to be generated. */ def unset(): Unit = { setFlag = false generate() } /** * Set the value of this element and propagate the effects to elements that depend on it * without changing their randomness. Also disallows the value of the element to change until unobserve or unset is called. */ def set(newValue: Value): Unit = { value = newValue for { layer <- universe.layers(universe.usedBy(this)) elem <- layer } { elem.generate() } setFlag = true } /** * Set the randomness of this element. * * Will generate its value using the new randomness and propagate the effects to elements that * depend on it without changing their randomness. */ def setRandomness(newRandomness: Randomness): Unit = { randomness = newRandomness set(generateValue(randomness)) } private var myPragmas: List[Pragma[Value]] = List() /** * The pragmas attached to the element. */ def pragmas: List[Pragma[Value]] = myPragmas /** * Add a pragma to the element. */ def addPragma(pragma: Pragma[Value]): Unit = myPragmas ::= pragma /** * Remove a pragma from the element. */ def removePragma(pragma: Pragma[Value]): Unit = myPragmas = myPragmas filterNot (_ == pragma) /** * The arguments on which the element depends. */ def args: List[Element[_]] /** * Flag indicating whether the element is currently active in its universe. */ var active: Boolean = _ universe.activate(this) /** * Activate the element in its universe. */ def activate(): Unit = universe.activate(this) /** * Deactivate the element in its universe. */ def deactivate(): Unit = universe.deactivate(this) /* Element self-generation on initialization was the cause of bugs. On infinite models, it can cause an infinite recursion, which could correctly be handled by * lazy factored inference. We have eliminated the need for self-generation on initialization. Algorithms that require elements to be generated should begin * by calling Universe.generateAll */ //generate() collection.add(this) /** * The element that tests equality of this element with another element. */ def ===(that: Element[Value])(implicit universe: Universe) = new Eq("", this, that, universe) /** * The element that tests whether the value of this element is equal to a particular value. */ def ===(that: Value)(implicit universe: Universe) = new Apply1("", this, (v: Value) => v == that, universe) /** * The element that tests inequality of this element with another element. */ def !==(that: Element[Value])(implicit universe: Universe) = new Neq("", this, that, universe) /** * A string that is the element's name, if it has a non-empty one, otherwise the result of the element's toString. */ def toNameString = if (name.string != "") name.string; else toString def map[U](fn: Value => U)(implicit name: Name[U], collection: ElementCollection): Element[U] = Apply(this, fn)(name, collection) def flatMap[U](fn: Value => Element[U])(implicit name: Name[U], collection: ElementCollection): Element[U] = Chain(this, fn)(name, collection) } object Element { /** * Implicit conversion of an element over Booleans to a BooleanElement, allowing Boolean operators * to be applied to it. */ implicit def toBooleanElement(elem: Element[Boolean]): BooleanElement = new BooleanElement(elem) /** * Implicit conversion of an element over Doubles to a DoubleElement, allowing Double operators * to be applied to it. */ implicit def toDoubleElement(elem: Element[Double]): DoubleElement = new DoubleElement(elem) /** * Implicit conversion of an element over Ints to an IntElement, allowing Int operators * to be applied to it. */ implicit def toIntElement(elem: Element[Int]): IntElement = new IntElement(elem) /** * Implicit conversion of an element over pairs to a DuoElement, allowing component extractors * to be applied to it. */ implicit def toDuoElement[T1, T2](elem: Element[(T1, T2)]): DuoElement[T1, T2] = new DuoElement(elem) /** * Implicit conversion of an element over triples to a TrioElement, allowing component extractors * to be applied to it. */ implicit def toTrioElement[T1, T2, T3](elem: Element[(T1, T2, T3)]): TrioElement[T1, T2, T3] = new TrioElement(elem) /** * Implicit conversion of an element over quadruples to a QuartetElement, allowing component extractors * to be applied to it. */ implicit def toQuartetElement[T1, T2, T3, T4](elem: Element[(T1, T2, T3, T4)]): QuartetElement[T1, T2, T3, T4] = new QuartetElement(elem) /** * Implicit conversion of an element over quintuples to a QuintetElement, allowing component extractors * to be applied to it. */ implicit def toQuintetElement[T1, T2, T3, T4, T5](elem: Element[(T1, T2, T3, T4, T5)]): QuintetElement[T1, T2, T3, T4, T5] = new QuintetElement(elem) /** A convenience class that pairs an element and a possible value, ensuring they have compatible types. */ case class ElemVal[T](elem: Element[T], value: T) /** The type of contingencies that can hold on elements. */ type Contingency = List[ElemVal[_]] /** * Returns the given elements and all elements on which they are contingent, closed recursively. * Only elements with condition. */ def closeUnderContingencies(elements: scala.collection.Set[Element[_]]): scala.collection.Set[Element[_]] = { def findContingent(elements: scala.collection.Set[Element[_]]): scala.collection.Set[Element[_]] = { // Find all elements not in the input set that the input set is contingent on for { element <- elements contingent <- element.elementsIAmContingentOn if !elements.contains(contingent) } yield contingent } var result = elements var adds = findContingent(result) while (!adds.isEmpty) { result ++= adds adds = findContingent(result) } result } } /** * Elements whose values can be cached and reused. */ trait Cacheable[V] extends Element[V] { override def isCachable = true } /** * Elements whose values can be cached and reused as long as the arguments are cacheable. */ trait IfArgsCacheable[V] extends Element[V] { override def isCachable = args forall (_.isCachable) }
jyuhuan/figaro
Figaro/src/main/scala/com/cra/figaro/language/Element.scala
Scala
bsd-3-clause
24,130
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.util /** * An interface to represent clocks, so that they can be mocked(假的;伪装的;模拟的) out in unit tests. * * 方法:<br> * <br>def getTimeMillis(): Long * <br><br>def waitTillTime(targetTime: Long): Long * */ private[spark] trait Clock { /** * * @return System.currentTimeMillis() */ def getTimeMillis(): Long def waitTillTime(targetTime: Long): Long } /** * A clock backed by the actual time from the OS as reported by the `System` API. * * */ private[spark] class SystemClock extends Clock { val minPollTime = 25L /** * @return the same time (milliseconds since the epoch) * as is reported by `System.currentTimeMillis()` */ def getTimeMillis(): Long = System.currentTimeMillis() /** * @param targetTime block until the current time is at least this value * @return current system time when wait has completed */ def waitTillTime(targetTime: Long): Long = { var currentTime = 0L currentTime = System.currentTimeMillis() var waitTime = targetTime - currentTime if (waitTime <= 0) { return currentTime } val pollTime = math.max(waitTime / 10.0, minPollTime).toLong while (true) { currentTime = System.currentTimeMillis() waitTime = targetTime - currentTime if (waitTime <= 0) { return currentTime } val sleepTime = math.min(waitTime, pollTime) Thread.sleep(sleepTime) } -1 } }
Dax1n/spark-core
core/src/main/scala/org/apache/spark/util/Clock.scala
Scala
apache-2.0
2,308
package models.admin import scalaz._ import Scalaz._ import scalaz.effect.IO import scalaz.EitherT._ import scalaz.Validation import scalaz.Validation.FlatMap._ import scalaz.NonEmptyList._ import cache._ import db._ import models.Constants._ import io.megam.auth.funnel.FunnelErrors._ import com.datastax.driver.core.{ ResultSet, Row } import com.websudos.phantom.dsl._ import scala.concurrent.{ Future => ScalaFuture } import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef } import scala.concurrent.Await import scala.concurrent.duration._ import utils.DateHelper import io.megam.util.Time import org.joda.time.{DateTime, DateTimeZone} import org.joda.time.format.{DateTimeFormat,ISODateTimeFormat} import io.megam.common.uid.UID import net.liftweb.json._ import controllers.stack.ImplicitJsonFormats import net.liftweb.json.scalaz.JsonScalaz._ import java.nio.charset.Charset /** * @author rajthilak */ case class LicensesInput(data: String) { val half_json = "\\"data\\":\\"" + data + "\\"" val json = "{" + half_json + "}" } case class LicensesResult(id: String, data: String, created_at: DateTime) {} sealed class LicensesSack extends CassandraTable[LicensesSack, LicensesResult] with ImplicitJsonFormats { object id extends StringColumn(this) with PrimaryKey[String] object data extends StringColumn(this) object created_at extends DateTimeColumn(this) def fromRow(row: Row): LicensesResult = { LicensesResult( id(row), data(row), created_at(row)) } } abstract class ConcreteLicenses extends LicensesSack with RootConnector { override lazy val tableName = "licenses" override implicit def space: KeySpace = scyllaConnection.space override implicit def session: Session = scyllaConnection.session def insertNewRecord(l: LicensesResult): ValidationNel[Throwable, ResultSet] = { val res = insert.value(_.id, l.id) .value(_.data, l.data) .value(_.created_at, l.created_at) .future() Await.result(res, 5.seconds).successNel } def getRecord(id: String): ValidationNel[Throwable, Option[LicensesResult]] = { val res = select.allowFiltering().where(_.id eqs id).one() Await.result(res, 5.seconds).successNel } } object Licenses extends ConcreteLicenses { val FIRST_ID = "1" private def mkLicensesSack(input: String): ValidationNel[Throwable, Option[LicensesResult]] = { val ripNel: ValidationNel[Throwable, LicensesInput] = (Validation.fromTryCatchThrowable[models.admin.LicensesInput, Throwable] { parse(input).extract[LicensesInput] } leftMap { t: Throwable ⇒ new MalformedBodyError(input, t.getMessage) }).toValidationNel //capture failure for { rip ← ripNel } yield { val res = LicensesResult(FIRST_ID, rip.data, DateHelper.now()) res.some } } def create(input: String): ValidationNel[Throwable, Option[LicensesResult]] = { for { wa ← (mkLicensesSack(input) leftMap { err: NonEmptyList[Throwable] ⇒ err }) set ← (insertNewRecord(wa.get) leftMap { t: NonEmptyList[Throwable] ⇒ t }) } yield { wa } } def findById(id: String): ValidationNel[Throwable, Option[LicensesResult]] = { val lid = id.some.getOrElse(FIRST_ID) (getRecord(lid) leftMap { t: NonEmptyList[Throwable] ⇒ new ServiceUnavailableError(id, (t.list.map(m ⇒ m.getMessage)).mkString("\\n")) }).toValidationNel.flatMap { xso: Option[LicensesResult] ⇒ xso match { case Some(xs) ⇒ { Validation.success[Throwable, Option[LicensesResult]](xs.some).toValidationNel } case None ⇒ Validation.failure[Throwable, Option[LicensesResult]](new ResourceItemNotFound(id, "")).toValidationNel } } } implicit val sedimentLicenses = new Sedimenter[ValidationNel[Throwable, Option[LicensesResult]]] { def sediment(maybeASediment: ValidationNel[Throwable, Option[LicensesResult]]): Boolean = { maybeASediment.isSuccess } } }
indykish/vertice_gateway
app/models/admin/Licenses.scala
Scala
mit
3,983
package squants object Platform { /** * Helper function to achieve uniform Double formatting over JVM, JS, and native platforms. * Simple Double.toString will format 1.0 as "1.0" on JVM and as "1" on JS * @param d Double number to be formatted * @return */ private[squants] def crossFormat(d: Double): String = { if (d.toLong == d) { "%.1f".format(d) } else { d.toString } } }
typelevel/squants
jvm/src/main/scala/squants/Platform.scala
Scala
apache-2.0
427
package com.tribbloids.spookystuff.integration.join import com.tribbloids.spookystuff.actions.Wget import com.tribbloids.spookystuff.extractors.Col /** * Created by peng on 25/10/15. */ class InnerWgetJoinIT extends InnerVisitJoinIT { override lazy val driverFactories = Seq( null ) override def getPage(uri: Col[String]) = Wget(uri) }
tribbloid/spookystuff
integration/src/test/scala/com/tribbloids/spookystuff/integration/join/InnerWgetJoinIT.scala
Scala
apache-2.0
354
object Test { def main(args: Array[String]): Unit = { val j = new J_2() println(j.bar1()) println(j.bar2()) println(j.bar3()) } }
som-snytt/dotty
tests/run/t4317/S_3.scala
Scala
apache-2.0
150
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.reactive.compression import minitest.SimpleTestSuite import minitest.laws.Checkers import monix.execution.Scheduler trait CompressionIntegrationSuite extends SimpleTestSuite with Checkers { implicit val scheduler: Scheduler = Scheduler.computation(parallelism = 4, name = "compression-tests", daemonic = true) def assertArrayEquals[T](a1: Array[T], a2: Array[T]): Unit = { assertEquals(a1.toList, a2.toList) } }
monifu/monifu
monix-reactive/jvm/src/test/scala/monix/reactive/compression/CompressionIntegrationSuite.scala
Scala
apache-2.0
1,119
package scodec package codecs import scalaz.\/ import scalaz.syntax.std.either._ import scalaz.syntax.std.option._ import java.nio.{ ByteBuffer, ByteOrder } import scodec.bits.{ BitVector, ByteOrdering, ByteVector } private[codecs] final class ShortCodec(bits: Int, signed: Boolean, ordering: ByteOrdering) extends Codec[Short] { require(bits > 0 && bits <= (if (signed) 16 else 15), "bits must be in range [1, 16] for signed and [1, 15] for unsigned") val MaxValue = ((1 << (if (signed) (bits - 1) else bits)) - 1).toShort val MinValue = (if (signed) -(1 << (bits - 1)) else 0).toShort private def description = s"$bits-bit ${if (signed) "signed" else "unsigned"} short" override def encode(s: Short) = { if (s > MaxValue) { \/.left(Err(s"$s is greater than maximum value $MaxValue for $description")) } else if (s < MinValue) { \/.left(Err(s"$s is less than minimum value $MinValue for $description")) } else { \/.right(BitVector.fromShort(s, bits, ordering)) } } override def decode(buffer: BitVector) = buffer.acquire(bits) match { case Left(e) => \/.left(Err.insufficientBits(bits, buffer.size)) case Right(b) => \/.right((buffer.drop(bits), b.toShort(signed, ordering))) } override def toString = description }
danielwegener/scodec
src/main/scala/scodec/codecs/ShortCodec.scala
Scala
bsd-3-clause
1,297
package com.idealicious import java.io.{FileWriter, IOException} // http://examples.javacodegeeks.com/core-java/writeread-csv-files-in-java-example/ class CSVWriter(val data: Seq[Journal], val header: Seq[String], val delimiter: String = ",", val newLine: String = "\\n") { def write(path: String) = { var fw: FileWriter = null try { fw = new FileWriter(path) // Write header fw.append(header.mkString(delimiter) + newLine) // Write each row data.foreach((e) => fw.append(e.title + delimiter + e.score.get + delimiter + e.abbrev + newLine)) } catch { case e: IOException => { println("Error writing to file.") e.printStackTrace() } } finally { try { fw.flush() fw.close() } catch { case e: IOException => { println("Error while flushing (LOL).") e.printStackTrace() } } } } }
aksiksi/impact
src/main/scala/com/idealicious/CSVWriter.scala
Scala
mit
937
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.apollo.broker.network import org.apache.activemq.apollo.util._ import org.fusesource.hawtdispatch._ import dto._ import CollectionsSupport._ import java.util.concurrent.TimeUnit._ import collection.mutable.{LinkedHashMap, HashSet, ListBuffer, HashMap} import org.apache.activemq.apollo.dto.CustomServiceDTO import org.apache.activemq.apollo.broker.{AcceptingConnector, VirtualHost, Broker, CustomServiceFactory} import java.net.InetSocketAddress /** * <p> * </p> * * @author <a href="http://hiramchirino.com">Hiram Chirino</a> */ object NetworkManagerFactory extends CustomServiceFactory with Log { def create(broker: Broker, dto: CustomServiceDTO): Service = dto match { case dto:NetworkManagerDTO => val rc = new NetworkManager(broker) rc.config = dto rc case _ => null } } object NetworkManager extends Log { def has_variables(x:String) = x.contains("{{"):Boolean def has_variables(dto:ClusterMemberDTO):Boolean = { import collection.JavaConversions._ has_variables(dto.id) || dto.services.foldLeft(false){ case (x,y) => x || has_variables(y.address) } } def resolve_variables(dto:ClusterMemberDTO, broker:Broker, host:VirtualHost):ClusterMemberDTO = { import collection.JavaConversions._ def resolve(x:String) = if( !x.contains("{{") ) { x } else { var rc = x; if( host!=null ) { rc = rc.replaceAllLiterally("{{host}}", host.id) } if( broker.web_server!=null && broker.web_server.uris()!=null && !broker.web_server.uris().isEmpty) { rc = rc.replaceAllLiterally("{{web_admin.url}}", broker.web_server.uris()(0).toString.stripSuffix("/")) } for( (id, connector) <- broker.connectors ) { connector match { case connector:AcceptingConnector => connector.socket_address match { case address:InetSocketAddress => rc = rc.replaceAllLiterally("{{connector."+id+".port}}", ""+address.getPort) case _ => } case _ => } } rc } val rc = new ClusterMemberDTO rc.id = resolve(dto.id) for( service <- dto.services) { val s = new ClusterServiceDTO s.kind = service.kind s.address = resolve(service.address) rc.services.add(s) } rc } } class NetworkManager(broker: Broker) extends BaseService with MembershipListener with BrokerLoadListener { import NetworkManager._ val dispatch_queue = createQueue("bridge manager") var config = new NetworkManagerDTO var membership_monitor:MembershipMonitor = _ var members = collection.Set[ClusterMemberDTO]() var members_by_id = HashMap[String, ClusterMemberDTO]() var load_monitor: BrokerLoadMonitor = _ var metrics_map = HashMap[String, BrokerMetrics]() val bridges = HashMap[BridgeInfo, BridgeDeployer]() def network_user = Option(config.user).getOrElse("network") def network_password = config.password def monitoring_interval = OptionSupport(config.monitoring_interval).getOrElse(5) protected def _start(on_completed: Task) = { import collection.JavaConversions._ // TODO: also support dynamic membership discovery.. var monitors = List[MembershipMonitor]() var static_set = config.members.toSet if( !has_variables(config.self) ) { static_set += config.self } monitors ::= StaticMembershipMonitor(static_set) for( monitor_dto <- config.membership_monitors ) { var monitor = MembershipMonitorFactory.create(broker, monitor_dto) if(monitor!=null) { monitors ::= monitor } else { warn("Could not create the membership monitor for: "+monitor_dto) } } membership_monitor = MulitMonitor(monitors) membership_monitor.listener = this membership_monitor.start(NOOP) load_monitor = new RestLoadMonitor(this) load_monitor.listener = this load_monitor.start(NOOP) schedule_reoccurring(1, SECONDS) { load_analysis } on_completed.run() } protected def _stop(on_completed: Task) = { membership_monitor.stop(NOOP) on_completed.run() } def on_membership_change(value: collection.Set[ClusterMemberDTO]) = dispatch_queue { val (added, _, removed) = diff(members, value) for( m <- removed ) { info("Broker host left the network: %s", m.id) load_monitor.remove(m) } for( m <- added ) { info("Broker host joined the network: %s", m.id) load_monitor.add(m) } members = value members_by_id = HashMap(members.toSeq.map(x=> (x.id->x)) : _*) } def on_load_change(dto: LoadStatusDTO) = dispatch_queue { metrics_map.getOrElseUpdate(dto.id, new BrokerMetrics()).update(dto, network_user) } def load_analysis = { dispatch_queue.assertExecuting() // Lets remove load entries for members that were removed from the cluster. val keys = metrics_map.keySet val current = members.map(_.id).toSet metrics_map = metrics_map -- (keys -- current) class DemandStatus { val needs_consumers = ListBuffer[(String,DestinationMetrics)]() val has_consumers = ListBuffer[(String,DestinationMetrics)]() } val queue_demand_map = HashMap[String, DemandStatus]() for( (broker, broker_load) <- metrics_map) { for( (dest_name, dest) <- broker_load.queue_load ) { val status = queue_demand_map.getOrElseUpdate(dest_name, new DemandStatus) var needsmoreconsumers = needs_more_consumers(dest) if( can_bridge_from(broker) && needsmoreconsumers ) { // The broker needs more consumers to drain the queue.. status.needs_consumers += (broker->dest) } else { // The broker can drain the queue of other brokers.. if( can_bridge_to(broker) && dest.consumer_count > 0 ) { status.has_consumers += (broker->dest) } } } } val desired_bridges = HashSet[BridgeInfo]() for( (id, demand) <- queue_demand_map ) { for( (from, from_metrics)<- demand.needs_consumers; (to, to_metrics) <-demand.has_consumers ) { // we could get fancy and compare the to_metrics and from_metrics to avoid // setting up bridges that won't make a big difference.. desired_bridges += BridgeInfo(from, to, "queue", id) } } val (bridges_added, _, bridges_removed) = diff(bridges.keySet, desired_bridges) // Stop and remove the bridges that are no longer needed.. for( info <- bridges_removed ) { bridges.remove(info).get.undeploy } // Create and start the new bridges.. for( info <- bridges_added ) { val controller = BridgeDeployer(info) bridges.put(info, controller) controller.deploy } } def is_local_broker_id(id:String):Boolean = { if( has_variables(config.self.id) ) { for( host <- broker.virtual_hosts.values ) { if( config.self.id.replaceAllLiterally("{{host}}", host.id) == id ) return true } false } else { config.self.id == id } } def can_bridge_from(broker:String):Boolean = is_local_broker_id(broker) def can_bridge_to(broker:String):Boolean = { if ( is_local_broker_id(broker) ) { OptionSupport(config.duplex).getOrElse(false) } else { true } } def needs_more_consumers(dest:DestinationMetrics):Boolean = { // nothing to drain.. so no need for consumers. if( dest.message_size == 0 && dest.enqueue_size_rate.mean == 0) { return false } val drain_rate = dest.dequeue_size_rate - dest.enqueue_size_rate.mean if( drain_rate < 0 ) { // Not draining... return true } // Might need a consumer due to being drained too slowly.. val drain_eta_in_seconds = dest.message_size / drain_rate return drain_eta_in_seconds > 60 } val bridging_strategies = LinkedHashMap[String, BridgingStrategy]() bridging_strategies.put("stomp", new StompBridgingStrategy(this)) case class BridgeDeployer(info:BridgeInfo) { def to = members_by_id.get(info.to) def from = members_by_id.get(info.from) var bridging_strategy:BridgingStrategy = _ var bridging_strategy_info : BridgeInfo = _ def deploy:Unit = { // Lets find a service kind that we can use to bridge... import collection.JavaConversions._ for( to <- to ; from <-from ) { // bridging_strategies are kept in preferred order for( (service_kind, strategy) <- bridging_strategies ) { // Lets look to see if we can use the strategy with services exposed by the broker.. for( to_service <- to.services; from_service <- from.services ) { if( bridging_strategy==null && to_service.kind==service_kind && to_service.kind==from_service.kind ) { bridging_strategy = strategy bridging_strategy_info = BridgeInfo(from_service.address, to_service.address, info.kind, info.dest) bridging_strategy.deploy( bridging_strategy_info ) } } } } } def undeploy = { if( bridging_strategy!=null ) { bridging_strategy.undeploy(bridging_strategy_info) bridging_strategy = null bridging_strategy_info = null } } } }
chirino/activemq-apollo
apollo-network/src/main/scala/org/apache/activemq/apollo/broker/network/NetworkManager.scala
Scala
apache-2.0
10,095
package wow.realm.protocol import scala.language.implicitConversions /** * Class used to indicite which opcode a payload is associated to * * @param opCode opcode * @tparam A payload type */ case class OpCodeProvider[A](opCode: OpCodes.Value) object OpCodeProvider { /** * Implicit cast from opcode to OpCodeProvider * * @param opCode opCode * @tparam A payload type * @return opcode provider for payload type A */ implicit def opCodeToProvider[A](opCode: OpCodes.Value): OpCodeProvider[A] = new OpCodeProvider[A](opCode) }
SKNZ/SpinaciCore
wow/core/src/main/scala/wow/realm/protocol/OpCodeProvider.scala
Scala
mit
566
package org.scalarules.finance /** * Aggregates the implicits of the finance DSL. A simple import og org.scalarules.finance.nl._ will bring all implicits * into scope. */ package object nl extends FinanceDsl { }
scala-rules/finance-dsl
src/main/scala/org/scalarules/finance/nl/package.scala
Scala
mit
220
/* * scala-swing (https://www.scala-lang.org) * * Copyright EPFL, Lightbend, Inc., contributors * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala.swing import javax.swing.JRadioButton /** * A two state button that is usually used in a <code>ButtonGroup</code> * together with other <code>RadioButton</code>s, in order to indicate * that at most one of them can be selected. * * @see javax.swing.JRadioButton */ class RadioButton(text0: String) extends ToggleButton { override lazy val peer: JRadioButton = new JRadioButton(text0) with SuperMixin def this() = this("") }
scala/scala-swing
src/main/scala/scala/swing/RadioButton.scala
Scala
apache-2.0
757
package teststep package modules object Errors { val base = """The correct use is: | teststep <path to source directory> | teststep <path to source file> <path to key-value map> | teststep <path to source directory> <path to key-value map> """.stripMargin val e1 = s"Missing arguments.\\n$base" def e2(path: String): String = s"No directory found at: $path\\n$base" def e3(path: String): String = s"The directory $path is empty." def e4(path: String): String = s"The directory $path does not contain any *.test files." def e5(path: String): String = s"The directroy $path does not contain the map.csv file." def e6(path: String): String = s"No file or directory found at: $path\\n$base" def e7(path: String): String = s"No file found at: $path\\n$base" def e8(path: String): String = s"The file $path is not a test-case file." def e9(path: String): String = s"The file $path is not a map file." def e10(path: String): String = s"The test-case in file $path does not contain any steps." }
ssfc/test_step
src/main/scala/teststep/modules/Errors.scala
Scala
apache-2.0
1,073
/* * Copyright 2015 Nicolas Rinaudo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kantan.csv package ops import kantan.codecs.laws.CodecValue import laws._ import laws.discipline.arbitrary._ import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks import scala.util.Try class CsvSourceOpsTests extends AnyFunSuite with ScalaCheckPropertyChecks with Matchers { type TestCase = (Int, Float, String, Boolean) def compare[F, A](csv: List[Either[F, A]], data: List[RowValue[A]]): Unit = { csv.length should be(data.length) csv.zip(data).foreach { case (Right(is), CodecValue.LegalValue(_, cs)) => is should be(cs) case (Left(_), CodecValue.IllegalValue(_)) => case (a, b) => fail(s"$a is not compatible with $b") } } test("CsvSource instances should have a working asCsvReader method") { forAll { data: List[RowValue[TestCase]] => compare( asCsv(data, rfc) .asCsvReader[TestCase](rfc) .toList, data ) } } test("CsvSource instances should have a working readCsv method") { forAll { data: List[RowValue[TestCase]] => compare( asCsv(data, rfc) .readCsv[List, TestCase](rfc), data ) } } def compareUnsafe[A](csv: => List[A], data: List[RowValue[A]]): Unit = { def cmp(csv: List[A], data: List[RowValue[A]]): Unit = (csv, data) match { case (Nil, Nil) => () case (h1 :: t1, CodecValue.LegalValue(_, h2) :: t2) if h1 == h2 => cmp(t1, t2) case (a, b) => fail(s"$a is not compatible with $b") } Try(csv) match { case scala.util.Success(is) => cmp(is, data) case _ => data.filter(_.isIllegal).nonEmpty should be(true) () } } test("CsvSource instances should have a working asUnsafeCsvReader method") { forAll { data: List[RowValue[TestCase]] => compareUnsafe( asCsv(data, rfc) .asUnsafeCsvReader[TestCase](rfc) .toList, data ) } } test("CsvSource instances should have a working unsafeReadCsv method") { forAll { data: List[RowValue[TestCase]] => compareUnsafe( asCsv(data, rfc) .unsafeReadCsv[List, TestCase](rfc), data ) } } }
nrinaudo/scala-csv
core/shared/src/test/scala/kantan/csv/ops/CsvSourceOpsTests.scala
Scala
mit
3,004
package plots import java.io.File import javafx.application.Application import javafx.scene._ import javafx.scene.chart._ import javafx.scene.control._ import javafx.scene.input._ import javafx.scene.layout._ import javafx.stage._ import scala.io.Source import javafx.collections.ObservableList import scala.collection.JavaConversions._ import scalaxy.fx._ import javafx.beans.property.ObjectProperty // sbt "~run first.csv second.csv" object Plotter extends App { Application.launch(classOf[Plotter], args: _*) } object Eval { import scala.reflect.runtime.currentMirror import scala.tools.reflect.ToolBox val toolbox = currentMirror.mkToolBox() type DoubleFunction = Double => Double def getDoubleFunction(param: String, expr: String): DoubleFunction = { val tree = toolbox.parse(s"($param: Double) => $expr") toolbox.compile(tree)().asInstanceOf[DoubleFunction] } } class Plotter extends Application { def buildSeries(name: String, data: Seq[XYChart.Data[Number, Number]]) = { val series = new XYChart.Series[Number, Number]() series.setName(name) series.getData.addAll(data)//f(file)) series } override def start(primaryStage: Stage) { import DSL._ import Eval._ def scene = primaryStage.getScene val xAxisField = new TextField("x") val yAxisField = new TextField("y") val xMinField = new TextField("0") val xMaxField = new TextField("10") val fField = new TextField("x * 2") val f = bind { // fField.text = "a" println("Evaluating: " + fField.text) try { Some(getDoubleFunction(xAxisField.text, fField.text)) } catch { case ex: Throwable => None } } val data = bind { println("Computing data") (for (x <- xMinField.text.toInt to xMaxField.text.toInt; ff <- f.getValue) yield { (x, ff(x)): XYChart.Data[Number, Number] }): Seq[XYChart.Data[Number, Number]] } primaryStage.set( title = "Plot", scene = new Scene( new StackPane() { getChildren.add( new BorderPane().set( top = vBox( hBox(new Label("Function:"), fField), hBox(new Label("X axis:"), xAxisField, xMinField, xMaxField), hBox(new Label("Y axis:"), yAxisField)), center = { val xAxis = new NumberAxis().set(label = bind { xAxisField.text }) val yAxis = new NumberAxis().set(label = bind { yAxisField.text }) print(xAxis.width) // To use an explicit range / tick unit instead, use: // val xAxis = new NumberAxis("Age", 0, 100, 4) //val chart = new ScatterChart[Number, Number](xAxis,yAxis) val chart = new LineChart[Number, Number](xAxis,yAxis) // chart.set(data = bind { //// val xMin = xMinField.text.toInt //// val xMax = xMaxField.text.toInt //// (for (x <- xMin to xMax; ff <- f.getValue) yield { // ((for (x <- xMinField.text.toInt to xMaxField.text.toInt; // ff <- f.getValue) yield { // (x, ff(x)): XYChart.Data[Number, Number] // }): Seq[XYChart.Data[Number, Number]]): ObservableList[XYChart.Data[Number, Number]] // }) data onChange { chart.getData.setAll( List( buildSeries(fField.text, data.getValue))) } // chart.onDragDone = (e: DragEvent) => println(e) // chart.setOnDragDone(e -> System.out.println(e)); // chart.getData.setAll(List( // buildSeries("f", Seq((0, 0), (1, 20), (10, 30)))//.map(IntXYData)) // )) chart }, bottom = new Button().set( text = "Reload stylesheet", onAction = { // com.sun.javafx.css.StyleManager.getInstance().reloadStylesheets(scene) } ) ) ) }, 500, 250 ) ) scene.getStylesheets.add("Chart.css"); primaryStage.show() } }
nativelibs4java/Scalaxy
Fx/Example/Grapher/src/main/scala/Plotter.scala
Scala
bsd-3-clause
4,386