code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package com.googlecode.warikan.domain.models /** * Role. * * @author yukei */ @serializable case class Role(name:String)
digitalsoul0124/warikan
src/main/scala/com/googlecode/warikan/domain/models/Role.scala
Scala
mit
126
/*********************************************************************** * Copyright (c) 2013-2020 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.utils.cache import java.io.File import java.nio.file.{Files, Path} import org.junit.runner.RunWith import org.locationtech.geomesa.utils.io.PathUtils import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner @RunWith(classOf[JUnitRunner]) class FilePersistenceTest extends Specification { def withTestDir[T](fn: (Path) => T): T = { val path = Files.createTempDirectory("gmFilePersistenceTest") try { fn(path) } finally { PathUtils.deleteRecursively(path) } } "FilePersistence" should { "fail for non-directories" in { withTestDir { dir => val file = new File(dir.toFile, "foo") file.createNewFile() must beTrue new FilePersistence(file, "foo") must throwAn[IllegalArgumentException] } } "set and get values" in { withTestDir { dir => val one = new FilePersistence(dir.toFile, "foo") one.persist("foo", "bar") one.persist("bar", "baz") one.persistAll(Map("fizz" -> "buzz", "baz" -> "blue")) one.keys() mustEqual Set("foo", "bar", "fizz", "baz") one.entries() mustEqual Set("bar" -> "baz", "baz" -> "blue", "foo" -> "bar", "fizz" -> "buzz") one.read("foo") must beSome("bar") one.read("bar") must beSome("baz") one.read("blue") must beNone one.remove("bar") must beTrue one.remove("blue") must beFalse one.removeAll(Seq("fizz", "baz")) one.keys() mustEqual Set("foo") one.entries() mustEqual Set("foo" -> "bar") one.read("foo") must beSome("bar") one.read("bar") must beNone } } "return properties by prefix" in { withTestDir { dir => val one = new FilePersistence(dir.toFile, "foo") one.persist("foo", "bar") one.persist("bar", "baz") one.persistAll(Map("fizz" -> "buzz", "baz" -> "blue")) one.entries("b") mustEqual Set("bar" -> "baz", "baz" -> "blue") } } "persist properties across instances" in { withTestDir { dir => val one = new FilePersistence(dir.toFile, "foo") one.persist("foo", "bar") one.persist("bar", "baz") one.persistAll(Map("fizz" -> "buzz", "baz" -> "blue")) val two = new FilePersistence(dir.toFile, "foo") two.entries() mustEqual one.entries() } } } }
aheyne/geomesa
geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/cache/FilePersistenceTest.scala
Scala
apache-2.0
2,854
package com.cloudray.scalapress.plugin.ecommerce import com.cloudray.scalapress.plugin.ecommerce.domain.{Address, Order} import com.cloudray.scalapress.payments.Purchase /** @author Stephen Samuel */ class OrderPurchase(val order: Order, val domain: String) extends Purchase { require(order != null) def successUrl: String = "http://" + domain + "/checkout/completed" def failureUrl: String = "http://" + domain + "/checkout/payment/failure" override def deliveryAddress: Option[Address] = Option(order.deliveryAddress) override def billingAddress: Option[Address] = Option(order.billingAddress) def accountName: String = order.account.name def accountEmail: String = order.account.email def total: Int = (order.total * 100).toInt def uniqueIdent: String = order.id.toString def callback = "Order" def paymentDescription: String = s"Order #${order.id} - $domain" }
vidyacraghav/scalapress
src/main/scala/com/cloudray/scalapress/plugin/ecommerce/OrderPurchase.scala
Scala
apache-2.0
892
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package org.scalajs.testsuite.junit import org.junit.Assert._ import org.junit.Test abstract class JUnitAbstractClassTest { @Test def test1(): Unit = () } class JUnitAbstractClassExtended1Test extends JUnitAbstractClassTest class JUnitAbstractClassExtended2Test extends JUnitAbstractClassTest { @Test def test2(): Unit = () } class JUnitAbstractClassTestCheck { @Test def testAbstractClass1(): Unit = { val boot = JUnitUtil.loadBootstrapper( "org.scalajs.testsuite.junit.JUnitAbstractClassExtended1Test") try { boot.invokeTest(boot.newInstance(), "test1") } catch { case e: Throwable => fail(s"Could not invoke a test: ${e.getMessage}") } } @Test def testAbstractClass2(): Unit = { val boot = JUnitUtil.loadBootstrapper( "org.scalajs.testsuite.junit.JUnitAbstractClassExtended2Test") try { boot.invokeTest(boot.newInstance(), "test1") boot.invokeTest(boot.newInstance(), "test2") } catch { case e: Throwable => fail(s"Could not invoke a test: ${e.getMessage}") } } }
scala-js/scala-js
test-suite/js/src/test/scala/org/scalajs/testsuite/junit/JUnitAbstractClassTest.scala
Scala
apache-2.0
1,353
package controllers import java.util.UUID import javax.inject.Inject import play.api.Logger import play.api.i18n.MessagesApi import play.api.libs.concurrent.Execution.Implicits.defaultContext import play.api.libs.json.Json import play.api.mvc._ import service.{UserHistoryService, HKGPostGrabber} import utils.{CookieHelper, LogUtils} class HKGPostController @Inject() ( val manager: ActorSystemController, val messagesApi: MessagesApi) extends Controller with UserHistoryService with CookieHelper with HKGPostGrabber { def getPostRest(messageId: Int, page: Int) = Action.async { request => getPostFromDBOrFallBack(messageId, page).map { case Some(post) => request.cookies.get(UUID_KEY).map(_.value) match { case Some(uuid) => saveHistory(uuid, post.toHistoryItem) Ok(Json.toJson(post)) case None => val newUUID = UUID.randomUUID().toString saveHistory(newUUID, post.toHistoryItem) Ok(Json.toJson(post)).withCookies(genCookie(newUUID)) } case None => NotFound } recover { case e => Logger.warn(LogUtils.getStackTraceAsString(e)) InternalServerError } } def getTopicsRest(page: Int, channel: String) = Action.async { getTopis(page, channel).map { case Some(topics) => Ok(Json.toJson(topics)) case None => NotFound } recover { case e => Logger.warn(LogUtils.getStackTraceAsString(e)) InternalServerError } } }
j4ckcyw/golden-cache
app/controllers/HKGPostController.scala
Scala
gpl-3.0
1,545
import scala.reflect.runtime.universe._, definitions._ object Test extends dotty.runtime.LegacyApp { // Tuples assert(TupleClass.seq.size == 22) assert(TupleClass(0) == NoSymbol) assert(TupleClass(23) == NoSymbol) assert((1 to 22).forall { i => TupleClass(i).name.toString == s"Tuple$i" }) // Functions assert(FunctionClass.seq.size == 23) assert(FunctionClass(-1) == NoSymbol) assert(FunctionClass(23) == NoSymbol) assert((0 to 22).forall { i => FunctionClass(i).name.toString == s"Function$i" }) // Products assert(ProductClass.seq.size == 23) assert(ProductClass(-1) == NoSymbol) assert(ProductClass(0) == UnitClass) assert(ProductClass(23) == NoSymbol) assert((1 to 22).forall { i => ProductClass(i).name.toString == s"Product$i" }) }
yusuke2255/dotty
tests/run/var-arity-class-symbol.scala
Scala
bsd-3-clause
771
package scodec package codecs import scalaz.\\/ import scalaz.syntax.std.either._ import scodec.bits.BitVector private[codecs] final class ConstantCodec(constant: BitVector, validate: Boolean = true) extends Codec[Unit] { override def encode(ignore: Unit) = \\/.right(constant) override def decode(buffer: BitVector) = if (validate) { buffer.acquire(constant.size) match { case Left(e) => \\/.left(Err.insufficientBits(constant.size, buffer.size)) case Right(b) => if (b == constant) \\/.right((buffer.drop(constant.size), ())) else \\/.left(Err(s"expected constant $constant but got $b")) } } else \\/.right((buffer drop constant.size, ())) override def toString = s"constant($constant)" }
danielwegener/scodec
src/main/scala/scodec/codecs/ConstantCodec.scala
Scala
bsd-3-clause
748
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.plan.util import java.sql.{Date, Time, Timestamp} import org.apache.calcite.plan.RelOptUtil import org.apache.calcite.rex._ import org.apache.calcite.sql.fun.SqlStdOperatorTable import org.apache.calcite.sql.{SqlFunction, SqlPostfixOperator} import org.apache.calcite.util.{DateString, TimeString, TimestampString} import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, SqlTimeTypeInfo} import org.apache.flink.table.api.TableException import org.apache.flink.table.calcite.FlinkTypeFactory import org.apache.flink.table.catalog.FunctionCatalog import org.apache.flink.table.expressions.utils.ApiExpressionUtils.unresolvedCall import org.apache.flink.table.expressions._ import org.apache.flink.table.util.JavaScalaConversionUtil import org.apache.flink.util.Preconditions import org.slf4j.{Logger, LoggerFactory} import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ import scala.collection.mutable import scala.util.Try object RexProgramExtractor { lazy val LOG: Logger = LoggerFactory.getLogger(getClass) /** * Extracts the indices of input fields which accessed by the RexProgram. * * @param rexProgram The RexProgram to analyze * @return The indices of accessed input fields */ def extractRefInputFields(rexProgram: RexProgram): Array[Int] = { val visitor = new InputRefVisitor // extract referenced input fields from projections rexProgram.getProjectList.foreach( exp => rexProgram.expandLocalRef(exp).accept(visitor)) // extract referenced input fields from condition val condition = rexProgram.getCondition if (condition != null) { rexProgram.expandLocalRef(condition).accept(visitor) } visitor.getFields } /** * Extract condition from RexProgram and convert it into independent CNF expressions. * * @param rexProgram The RexProgram to analyze * @return converted expressions as well as RexNodes which cannot be translated */ def extractConjunctiveConditions( rexProgram: RexProgram, rexBuilder: RexBuilder, catalog: FunctionCatalog): (Array[Expression], Array[RexNode]) = { rexProgram.getCondition match { case condition: RexLocalRef => val expanded = rexProgram.expandLocalRef(condition) // converts the expanded expression to conjunctive normal form, // like "(a AND b) OR c" will be converted to "(a OR c) AND (b OR c)" val cnf = RexUtil.toCnf(rexBuilder, expanded) // converts the cnf condition to a list of AND conditions val conjunctions = RelOptUtil.conjunctions(cnf) val convertedExpressions = new mutable.ArrayBuffer[Expression] val unconvertedRexNodes = new mutable.ArrayBuffer[RexNode] val inputNames = rexProgram.getInputRowType.getFieldNames.asScala.toArray val converter = new RexNodeToExpressionConverter(inputNames, catalog) conjunctions.asScala.foreach(rex => { rex.accept(converter) match { case Some(expression) => convertedExpressions += expression case None => unconvertedRexNodes += rex } }) (convertedExpressions.toArray, unconvertedRexNodes.toArray) case _ => (Array.empty, Array.empty) } } /** * Extracts the name of nested input fields accessed by the RexProgram and returns the * prefix of the accesses. * * @param rexProgram The RexProgram to analyze * @return The full names of accessed input fields. e.g. field.subfield */ def extractRefNestedInputFields( rexProgram: RexProgram, usedFields: Array[Int]): Array[Array[String]] = { val visitor = new RefFieldAccessorVisitor(usedFields) rexProgram.getProjectList.foreach(exp => rexProgram.expandLocalRef(exp).accept(visitor)) val condition = rexProgram.getCondition if (condition != null) { rexProgram.expandLocalRef(condition).accept(visitor) } visitor.getProjectedFields } } /** * An RexVisitor to extract all referenced input fields */ class InputRefVisitor extends RexVisitorImpl[Unit](true) { private val fields = mutable.LinkedHashSet[Int]() def getFields: Array[Int] = fields.toArray override def visitInputRef(inputRef: RexInputRef): Unit = fields += inputRef.getIndex override def visitCall(call: RexCall): Unit = call.operands.foreach(operand => operand.accept(this)) } /** * An RexVisitor to convert RexNode to Expression. * * @param inputNames The input names of the relation node * @param functionCatalog The function catalog */ class RexNodeToExpressionConverter( inputNames: Array[String], functionCatalog: FunctionCatalog) extends RexVisitor[Option[Expression]] { override def visitInputRef(inputRef: RexInputRef): Option[Expression] = { Preconditions.checkArgument(inputRef.getIndex < inputNames.length) Some(PlannerResolvedFieldReference( inputNames(inputRef.getIndex), FlinkTypeFactory.toTypeInfo(inputRef.getType) )) } override def visitTableInputRef(rexTableInputRef: RexTableInputRef): Option[Expression] = visitInputRef(rexTableInputRef) override def visitLocalRef(localRef: RexLocalRef): Option[Expression] = { throw new TableException("Bug: RexLocalRef should have been expanded") } override def visitLiteral(literal: RexLiteral): Option[Expression] = { val literalType = FlinkTypeFactory.toTypeInfo(literal.getType) val literalValue = literalType match { case [email protected] => val rexValue = literal.getValueAs(classOf[DateString]) Date.valueOf(rexValue.toString) case [email protected] => val rexValue = literal.getValueAs(classOf[TimeString]) Time.valueOf(rexValue.toString(0)) case [email protected] => val rexValue = literal.getValueAs(classOf[TimestampString]) Timestamp.valueOf(rexValue.toString(3)) case [email protected]_TYPE_INFO => // convert from BigDecimal to Byte literal.getValueAs(classOf[java.lang.Byte]) case [email protected]_TYPE_INFO => // convert from BigDecimal to Short literal.getValueAs(classOf[java.lang.Short]) case [email protected]_TYPE_INFO => // convert from BigDecimal to Integer literal.getValueAs(classOf[java.lang.Integer]) case [email protected]_TYPE_INFO => // convert from BigDecimal to Long literal.getValueAs(classOf[java.lang.Long]) case [email protected]_TYPE_INFO => // convert from BigDecimal to Float literal.getValueAs(classOf[java.lang.Float]) case [email protected]_TYPE_INFO => // convert from BigDecimal to Double literal.getValueAs(classOf[java.lang.Double]) case [email protected]_TYPE_INFO => // convert from NlsString to String literal.getValueAs(classOf[java.lang.String]) case [email protected]_TYPE_INFO => // convert to Boolean literal.getValueAs(classOf[java.lang.Boolean]) case [email protected]_DEC_TYPE_INFO => // convert to BigDecimal literal.getValueAs(classOf[java.math.BigDecimal]) case _ => // Literal type is not supported. RexProgramExtractor.LOG.debug( "Literal {} of SQL type {} is not supported and cannot be converted. " + "Please reach out to the community if you think this type should be supported.", Array(literal, literal.getType): _*) return None } Some(Literal(literalValue, literalType)) } override def visitCall(call: RexCall): Option[Expression] = { val operands = call.getOperands.map( operand => operand.accept(this).orNull ) // return null if we cannot translate all the operands of the call if (operands.contains(null)) { None } else { // TODO we cast to planner expression as a temporary solution to keep the old interfaces call.getOperator match { case SqlStdOperatorTable.OR => Option(operands.reduceLeft { (l, r) => Or(l.asInstanceOf[PlannerExpression], r.asInstanceOf[PlannerExpression]) }) case SqlStdOperatorTable.AND => Option(operands.reduceLeft { (l, r) => And(l.asInstanceOf[PlannerExpression], r.asInstanceOf[PlannerExpression]) }) case function: SqlFunction => lookupFunction(replace(function.getName), operands) case postfix: SqlPostfixOperator => lookupFunction(replace(postfix.getName), operands) case operator@_ => lookupFunction(replace(s"${operator.getKind}"), operands) } } } override def visitFieldAccess(fieldAccess: RexFieldAccess): Option[Expression] = None override def visitCorrelVariable(correlVariable: RexCorrelVariable): Option[Expression] = None override def visitRangeRef(rangeRef: RexRangeRef): Option[Expression] = None override def visitSubQuery(subQuery: RexSubQuery): Option[Expression] = None override def visitDynamicParam(dynamicParam: RexDynamicParam): Option[Expression] = None override def visitOver(over: RexOver): Option[Expression] = None override def visitPatternFieldRef(fieldRef: RexPatternFieldRef): Option[Expression] = None private def lookupFunction(name: String, operands: Seq[Expression]): Option[Expression] = { // TODO we assume only planner expression as a temporary solution to keep the old interfaces val expressionBridge = new ExpressionBridge[PlannerExpression]( functionCatalog, PlannerExpressionConverter.INSTANCE) JavaScalaConversionUtil.toScala(functionCatalog.lookupFunction(name)) .flatMap(result => Try(expressionBridge.bridge( unresolvedCall(result.getFunctionDefinition, operands: _*))).toOption ) } private def replace(str: String): String = { str.replaceAll("\\\\s|_", "") } } /** * A RexVisitor to extract used nested input fields */ class RefFieldAccessorVisitor(usedFields: Array[Int]) extends RexVisitorImpl[Unit](true) { private val projectedFields: Array[Array[String]] = Array.fill(usedFields.length)(Array.empty) private val order: Map[Int, Int] = usedFields.zipWithIndex.toMap /** Returns the prefix of the nested field accesses */ def getProjectedFields: Array[Array[String]] = { projectedFields.map { nestedFields => // sort nested field accesses val sorted = nestedFields.sorted // get prefix field accesses val prefixAccesses = sorted.foldLeft(Nil: List[String]) { (prefixAccesses, nestedAccess) => prefixAccesses match { // first access => add access case Nil => List[String](nestedAccess) // top-level access already found => return top-level access case head :: Nil if head.equals("*") => prefixAccesses // access is top-level access => return top-level access case _ :: _ if nestedAccess.equals("*") => List("*") // previous access is not prefix of this access => add access case head :: _ if !nestedAccess.startsWith(head) => nestedAccess :: prefixAccesses // previous access is a prefix of this access => do not add access case _ => prefixAccesses } } prefixAccesses.toArray } } override def visitFieldAccess(fieldAccess: RexFieldAccess): Unit = { def internalVisit(fieldAccess: RexFieldAccess): (Int, String) = { fieldAccess.getReferenceExpr match { case ref: RexInputRef => (ref.getIndex, fieldAccess.getField.getName) case fac: RexFieldAccess => val (i, n) = internalVisit(fac) (i, s"$n.${fieldAccess.getField.getName}") } } val (index, fullName) = internalVisit(fieldAccess) val outputIndex = order.getOrElse(index, -1) val fields: Array[String] = projectedFields(outputIndex) projectedFields(outputIndex) = fields :+ fullName } override def visitInputRef(inputRef: RexInputRef): Unit = { val outputIndex = order.getOrElse(inputRef.getIndex, -1) val fields: Array[String] = projectedFields(outputIndex) projectedFields(outputIndex) = fields :+ "*" } override def visitCall(call: RexCall): Unit = call.operands.foreach(operand => operand.accept(this)) }
fhueske/flink
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/util/RexProgramExtractor.scala
Scala
apache-2.0
13,252
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package org.apache.toree.kernel.interpreter.scala import java.io.{BufferedReader, ByteArrayOutputStream, InputStreamReader, PrintStream} import java.net.{URL, URLClassLoader} import java.nio.charset.Charset import java.util.concurrent.ExecutionException import org.apache.spark.SparkContext import org.apache.spark.repl.{SparkCommandLine, SparkIMain, SparkJLineCompletion} import org.apache.spark.sql.SQLContext import org.apache.toree.global.StreamState import org.apache.toree.interpreter._ import org.apache.toree.interpreter.imports.printers.{WrapperConsole, WrapperSystem} import org.apache.toree.kernel.api.{KernelLike, KernelOptions} import org.apache.toree.utils.{MultiOutputStream, TaskManager} import org.slf4j.LoggerFactory import scala.annotation.tailrec import scala.concurrent.{Await, Future} import scala.language.reflectiveCalls import scala.tools.nsc.backend.JavaPlatform import scala.tools.nsc.interpreter.{IR, InputStream, JPrintWriter, OutputStream} import scala.tools.nsc.io.AbstractFile import scala.tools.nsc.util.{ClassPath, MergedClassPath} import scala.tools.nsc.{Global, Settings, io} import scala.util.{Try => UtilTry} /** * Provides Scala version-specific features needed for the interpreter. */ trait ScalaInterpreterSpecific { this: ScalaInterpreter => private val ExecutionExceptionName = "lastException" var sparkIMain: SparkIMain = _ protected var jLineCompleter: SparkJLineCompletion = _ protected def newSparkIMain( settings: Settings, out: JPrintWriter ): SparkIMain = { val s = new SparkIMain(settings, out) s.initializeSynchronous() s } /** * Adds jars to the runtime and compile time classpaths. Does not work with * directories or expanding star in a path. * @param jars The list of jar locations */ override def addJars(jars: URL*): Unit = { // Enable Scala class support reinitializeSymbols() jars.foreach(_runtimeClassloader.addJar) updateCompilerClassPath(jars : _*) // Refresh all of our variables refreshDefinitions() } // TODO: Need to figure out a better way to compare the representation of // an annotation (contained in AnnotationInfo) with various annotations // like scala.transient protected def convertAnnotationsToModifiers( annotationInfos: List[Global#AnnotationInfo] ) = annotationInfos map { case a if a.toString == "transient" => "@transient" case a => logger.debug(s"Ignoring unknown annotation: $a") "" } filterNot { _.isEmpty } protected def convertScopeToModifiers(scopeSymbol: Global#Symbol) = { (if (scopeSymbol.isImplicit) "implicit" else "") :: Nil } protected def buildModifierList(termNameString: String) = { import scala.language.existentials val termSymbol = sparkIMain.symbolOfTerm(termNameString) convertAnnotationsToModifiers( if (termSymbol.hasAccessorFlag) termSymbol.accessed.annotations else termSymbol.annotations ) ++ convertScopeToModifiers(termSymbol) } protected def refreshDefinitions(): Unit = { sparkIMain.definedTerms.foreach(termName => { val termNameString = termName.toString val termTypeString = sparkIMain.typeOfTerm(termNameString).toLongString sparkIMain.valueOfTerm(termNameString) match { case Some(termValue) => val modifiers = buildModifierList(termNameString) logger.debug(s"Rebinding of $termNameString as " + s"${modifiers.mkString(" ")} $termTypeString") UtilTry(sparkIMain.beSilentDuring { sparkIMain.bind( termNameString, termTypeString, termValue, modifiers ) }) case None => logger.debug(s"Ignoring rebinding of $termNameString") } }) } protected def reinitializeSymbols(): Unit = { val global = sparkIMain.global import global._ new Run // Initializes something needed for Scala classes } protected def updateCompilerClassPath( jars: URL*): Unit = { require(!sparkIMain.global.forMSIL) // Only support JavaPlatform val platform = sparkIMain.global.platform.asInstanceOf[JavaPlatform] val newClassPath = mergeJarsIntoClassPath(platform, jars:_*) logger.debug(s"newClassPath: ${newClassPath}") // TODO: Investigate better way to set this... one thought is to provide // a classpath in the currentClassPath (which is merged) that can be // replaced using updateClasspath, but would that work more than once? val fieldSetter = platform.getClass.getMethods .find(_.getName.endsWith("currentClassPath_$eq")).get fieldSetter.invoke(platform, Some(newClassPath)) // Reload all jars specified into our compiler sparkIMain.global.invalidateClassPathEntries(jars.map(_.getPath): _*) } protected def mergeJarsIntoClassPath(platform: JavaPlatform, jars: URL*): MergedClassPath[AbstractFile] = { // Collect our new jars and add them to the existing set of classpaths val allClassPaths = ( platform.classPath .asInstanceOf[MergedClassPath[AbstractFile]].entries ++ jars.map(url => platform.classPath.context.newClassPath( io.AbstractFile.getFile(url.getPath)) ) ).distinct // Combine all of our classpaths (old and new) into one merged classpath new MergedClassPath( allClassPaths, platform.classPath.context ) } /** * Binds a variable in the interpreter to a value. * @param variableName The name to expose the value in the interpreter * @param typeName The type of the variable, must be the fully qualified class name * @param value The value of the variable binding * @param modifiers Any annotation, scoping modifiers, etc on the variable */ override def bind( variableName: String, typeName: String, value: Any, modifiers: List[String] ): Unit = { require(sparkIMain != null) sparkIMain.bind(variableName, typeName, value, modifiers) } /** * Executes body and will not print anything to the console during the execution * @param body The function to execute * @tparam T The return type of body * @return The return value of body */ override def doQuietly[T](body: => T): T = { require(sparkIMain != null) sparkIMain.beQuietDuring[T](body) } /** * Stops the interpreter, removing any previous internal state. * @return A reference to the interpreter */ override def stop(): Interpreter = { logger.info("Shutting down interpreter") // Shut down the task manager (kills current execution if (taskManager != null) taskManager.stop() taskManager = null // Erase our completer jLineCompleter = null // Close the entire interpreter (loses all state) if (sparkIMain != null) sparkIMain.close() sparkIMain = null this } /** * @return Returns a string to reference the URI of where the interpreted class files are created */ override def classServerURI: String = { require(sparkIMain != null) sparkIMain.classServerUri } /** * Returns the name of the variable created from the last execution. * @return Some String name if a variable was created, otherwise None */ override def lastExecutionVariableName: Option[String] = { require(sparkIMain != null) // TODO: Get this API method changed back to public in Apache Spark val lastRequestMethod = classOf[SparkIMain].getDeclaredMethod("lastRequest") lastRequestMethod.setAccessible(true) val request = lastRequestMethod.invoke(sparkIMain).asInstanceOf[SparkIMain#Request] val mostRecentVariableName = sparkIMain.mostRecentVar request.definedNames.map(_.toString).find(_ == mostRecentVariableName) } /** * Mask the Console and System objects with our wrapper implementations * and dump the Console methods into the public namespace (similar to * the Predef approach). * @param in The new input stream * @param out The new output stream * @param err The new error stream */ override def updatePrintStreams( in: InputStream, out: OutputStream, err: OutputStream ): Unit = { val inReader = new BufferedReader(new InputStreamReader(in)) val outPrinter = new PrintStream(out) val errPrinter = new PrintStream(err) sparkIMain.beQuietDuring { sparkIMain.bind( "Console", classOf[WrapperConsole].getName, new WrapperConsole(inReader, outPrinter, errPrinter), List("""@transient""") ) sparkIMain.bind( "System", classOf[WrapperSystem].getName, new WrapperSystem(in, out, err), List("""@transient""") ) sparkIMain.addImports("Console._") } } /** * Retrieves the contents of the variable with the provided name from the * interpreter. * @param variableName The name of the variable whose contents to read * @return An option containing the variable contents or None if the * variable does not exist */ override def read(variableName: String): Option[AnyRef] = { require(sparkIMain != null) val variable = sparkIMain.valueOfTerm(variableName) if (variable == null || variable.isEmpty) None else variable } /** * Starts the interpreter, initializing any internal state. * @return A reference to the interpreter */ override def start(): Interpreter = { require(sparkIMain == null && taskManager == null) taskManager = newTaskManager() logger.debug("Initializing task manager") taskManager.start() sparkIMain = newSparkIMain(settings, new JPrintWriter(multiOutputStream, true)) //logger.debug("Initializing interpreter") //sparkIMain.initializeSynchronous() logger.debug("Initializing completer") jLineCompleter = new SparkJLineCompletion(sparkIMain) sparkIMain.beQuietDuring { //logger.info("Rerouting Console and System related input and output") //updatePrintStreams(System.in, multiOutputStream, multiOutputStream) // ADD IMPORTS generates too many classes, client is responsible for adding import logger.debug("Adding org.apache.spark.SparkContext._ to imports") sparkIMain.addImports("org.apache.spark.SparkContext._") } this } /** * Attempts to perform code completion via the <TAB> command. * @param code The current cell to complete * @param pos The cursor position * @return The cursor position and list of possible completions */ override def completion(code: String, pos: Int): (Int, List[String]) = { require(jLineCompleter != null) logger.debug(s"Attempting code completion for ${code}") val regex = """[0-9a-zA-Z._]+$""".r val parsedCode = (regex findAllIn code).mkString("") logger.debug(s"Attempting code completion for ${parsedCode}") val result = jLineCompleter.completer().complete(parsedCode, pos) (result.cursor, result.candidates) } protected def newSettings(args: List[String]): Settings = new SparkCommandLine(args).settings protected def interpretAddTask(code: String, silent: Boolean): Future[IR.Result] = { taskManager.add { // Add a task using the given state of our streams StreamState.withStreams { if (silent) { sparkIMain.beSilentDuring { sparkIMain.interpret(code) } } else { sparkIMain.interpret(code) } } } } protected def interpretMapToResultAndExecuteInfo( future: Future[(Results.Result, String)] ): Future[(Results.Result, Either[ExecuteOutput, ExecuteFailure])] = { import scala.concurrent.ExecutionContext.Implicits.global future map { case (Results.Success, output) => (Results.Success, Left(output)) case (Results.Incomplete, output) => (Results.Incomplete, Left(output)) case (Results.Aborted, output) => (Results.Aborted, Right(null)) case (Results.Error, output) => val x = sparkIMain.valueOfTerm(ExecutionExceptionName) ( Results.Error, Right( interpretConstructExecuteError( sparkIMain.valueOfTerm(ExecutionExceptionName), output ) ) ) } } protected def interpretConstructExecuteError( value: Option[AnyRef], output: String ) = value match { // Runtime error case Some(e) if e != null => val ex = e.asInstanceOf[Throwable] // Clear runtime error message sparkIMain.directBind( ExecutionExceptionName, classOf[Throwable].getName, null ) ExecuteError( ex.getClass.getName, ex.getLocalizedMessage, ex.getStackTrace.map(_.toString).toList ) // Compile time error, need to check internal reporter case _ => if (sparkIMain.isReportingErrors) // TODO: This wrapper is not needed when just getting compile // error that we are not parsing... maybe have it be purely // output and have the error check this? ExecuteError( "Compile Error", output, List() ) else ExecuteError("Unknown", "Unable to retrieve error!", List()) } }
asorianostratio/incubator-toree
scala-interpreter/src/main/scala-2.10/org/apache/toree/kernel/interpreter/scala/ScalaInterpreterSpecific.scala
Scala
apache-2.0
14,056
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package views.msb import models.businessmatching.{BusinessMatchingMsbServices, CurrencyExchange, ForeignExchange, TransmittingMoney} import org.scalatest.MustMatchers import play.api.i18n.Messages import utils.AmlsViewSpec import views.Fixture import views.html.msb.what_you_need class what_you_needSpec extends AmlsViewSpec with MustMatchers { trait ViewFixture extends Fixture { lazy val what_you_need = app.injector.instanceOf[what_you_need] implicit val requestWithToken = addTokenForView() def view = what_you_need() } "What you need View" must { "have the back link button" in new ViewFixture { doc.getElementsByAttributeValue("class", "link-back") must not be empty } "Have the correct title" in new ViewFixture { doc.title must startWith(Messages("title.wyn")) } "have the correct Headings" in new ViewFixture{ heading.html must be (Messages("title.wyn")) subHeading.html must include (Messages("summary.msb")) } "have an introduction to the list of information needed" in new ViewFixture{ html must include(Messages("msb.whatyouneed.requiredinfo.heading")) } "state that throughput info will be needed" in new ViewFixture { html must include(Messages("msb.whatyouneed.line_1")) } "state that branches or agents in other countries will be needed" in new ViewFixture { html must include(Messages("which countries you have branches or agents in, if you have any")) } "state system can identify linked transactions" in new ViewFixture { html must include(Messages("if your systems can identify linked transactions")) } "not display info that will not be needed" in new ViewFixture { html must not include Messages("your Intermediary Payment Service Provider’s name and Money Laundering Regulations number, if you use one") html must not include Messages("if you transfer money without using formal banking systems") html must not include Messages("the number of money transfers you expect to make in the next 12 months") html must not include Messages("which countries you expect to send the largest amounts of money to, if you send money to other countries") html must not include Messages("which countries you expect to send the most transactions to, if you send money to other countries") html must not include Messages("the number of currency exchange transactions you expect in the next 12 months") html must not include Messages("which currencies you expect to supply the most to your customers") html must not include Messages("who will supply your foreign currency, if you expect to deal in physical foreign currencies") html must not include Messages("the number foreign exchange transactions you expect in the next 12 months") } "Transmitting Money is a selected MSB subservice" when { trait TMViewFixture extends ViewFixture { override def view = what_you_need(BusinessMatchingMsbServices(Set(TransmittingMoney))) } "provide correct content for TM" in new TMViewFixture { html must include(Messages("your Intermediary Payment Service Provider’s name and Money Laundering Regulations number, if you use one")) html must include(Messages("if you transfer money without using formal banking systems")) html must include(Messages("the number of money transfers you expect to make in the next 12 months")) html must include(Messages("which countries you expect to send the largest amounts of money to, if you send money to other countries")) html must include(Messages("which countries you expect to send the most transactions to, if you send money to other countries")) } } "Currency Exchange is a selected MSB subservice" when { trait CXViewFixture extends ViewFixture { override def view = what_you_need(BusinessMatchingMsbServices(Set(CurrencyExchange))) } "provide correct content for currencyExchange" in new CXViewFixture { html must include(Messages("the number of currency exchange transactions you expect in the next 12 months")) html must include(Messages("which currencies you expect to supply the most to your customers")) html must include(Messages("who will supply your foreign currency, if you expect to deal in physical foreign currencies")) } } "Foreign Exchange is a selected MSB subservice" when { trait FXViewFixture extends ViewFixture { override def view = what_you_need(BusinessMatchingMsbServices(Set(ForeignExchange))) } "state foreign exchange transactions info will be needed" in new FXViewFixture { html must include(Messages("the number foreign exchange transactions you expect in the next 12 months")) } } } }
hmrc/amls-frontend
test/views/msb/what_you_needSpec.scala
Scala
apache-2.0
5,457
package lila.team import akka.actor.ActorSelection import akka.pattern.ask import lila.hub.actorApi.message.LichessThread import lila.hub.actorApi.router._ private[team] final class Notifier( sender: String, messenger: ActorSelection, router: ActorSelection) { import makeTimeout.large def acceptRequest(team: Team, request: Request) { teamUrl(team.id) foreach { url => messenger ! LichessThread( from = sender, to = request.user, subject = """You have joined the team %s""".format(team.name), message = """Congratulation, your request to join the team was accepted! Here is the team page: %s""" format url ) } } private def teamUrl(id: String) = router ? Abs(TeamShow(id)) mapTo manifest[String] }
Happy0/lila
modules/team/src/main/Notifier.scala
Scala
mit
782
/* * Copyright 2006-2011 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb package util import scala.xml.{NodeSeq} import javax.mail._ import javax.mail.internet._ import javax.naming.{Context, InitialContext} import java.util.Properties import common._ import actor._ /** * Utilities for sending email. */ object Mailer extends MailerImpl /** * This trait implmenets the mail sending */ protected trait MailerImpl extends SimpleInjector { private val logger = Logger(classOf[MailerImpl]) sealed abstract class MailTypes sealed abstract class MailBodyType extends MailTypes case class PlusImageHolder(name: String, mimeType: String, bytes: Array[Byte]) /** * Represents a text/plain mail body. The given text will * be encoded as UTF-8 when sent. */ case class PlainMailBodyType(text: String) extends MailBodyType /** * Represents a text/plain mail body that is encoded with the * specified charset */ case class PlainPlusBodyType(text: String, charset: String) extends MailBodyType case class XHTMLMailBodyType(text: NodeSeq) extends MailBodyType case class XHTMLPlusImages(text: NodeSeq, items: PlusImageHolder*) extends MailBodyType sealed abstract class RoutingType extends MailTypes sealed abstract class AddressType(val adr: String) extends RoutingType case class From(address: String) extends AddressType(address) case class To(address: String) extends AddressType(address) case class CC(address: String) extends AddressType(address) case class Subject(subject: String) extends RoutingType case class BCC(address: String) extends AddressType(address) case class ReplyTo(address: String) extends AddressType(address) implicit def xmlToMailBodyType(html: NodeSeq): MailBodyType = XHTMLMailBodyType(html) case class MessageInfo(from: From, subject: Subject, info: List[MailTypes]) implicit def addressToAddress(in: AddressType): Address = new InternetAddress(in.adr) implicit def adListToAdArray(in: List[AddressType]): Array[Address] = in.map(a => new InternetAddress(a.adr)).toArray /** * Passwords cannot be accessed via System.getProperty. Instead, we * provide a means of explicitlysetting the authenticator. */ //def authenticator = authenticatorFunc var authenticator: Box[Authenticator] = Empty /** * Use the mailer resource in your container by specifying the JNDI name */ var jndiName: Box[String] = Empty /** * Custom properties for the JNDI session */ var customProperties: Map[String, String] = Map() lazy val jndiSession: Box[Session] = for{ name <- jndiName contextObj <- Helpers.tryo(new InitialContext().lookup("java:comp/env")) context <- Box.asA[Context](contextObj) sessionObj <- Helpers.tryo(context.lookup(name)) session <- Box.asA[Session](sessionObj) } yield session lazy val properties: Properties = { val p = System.getProperties.clone.asInstanceOf[Properties] customProperties.foreach {case (name, value) => p.put(name, value)} // allow the properties file to set/override system properties Props.props.foreach { case (name, value) => p.setProperty(name, value) } p } /** * The host that should be used to send mail. */ def host = hostFunc() /** * To change the way the host is calculated, set this to the function that calcualtes the host name. * By default: System.getProperty("mail.smtp.host") */ var hostFunc: () => String = _host _ private def _host = properties.getProperty("mail.smtp.host") match { case null => "localhost" case s => s } def buildProps: Properties = { val p = properties.clone.asInstanceOf[Properties] p.getProperty("mail.smtp.host") match { case null => p.put("mail.smtp.host", host) case _ => } p } /** * Set the mail.charset property to something other than UTF-8 for non-UTF-8 * mail. */ lazy val charSet = properties.getProperty("mail.charset") match { case null => "UTF-8" case x => x } // def host_=(hostname: String) = System.setProperty("mail.smtp.host", hostname) protected class MsgSender extends SpecializedLiftActor[MessageInfo] { protected def messageHandler = { case MessageInfo(from, subject, info) => try { val session = authenticator match { case Full(a) => jndiSession openOr Session.getInstance(buildProps, a) case _ => jndiSession openOr Session.getInstance(buildProps) } val message = new MimeMessage(session) message.setFrom(from) message.setRecipients(Message.RecipientType.TO, info.flatMap {case x: To => Some[To](x) case _ => None}) message.setRecipients(Message.RecipientType.CC, info.flatMap {case x: CC => Some[CC](x) case _ => None}) message.setRecipients(Message.RecipientType.BCC, info.flatMap {case x: BCC => Some[BCC](x) case _ => None}) // message.setReplyTo(filter[MailTypes, ReplyTo](info, {case x @ ReplyTo(_) => Some(x); case _ => None})) message.setReplyTo(info.flatMap {case x: ReplyTo => Some[ReplyTo](x) case _ => None}) message.setSubject(subject.subject) val bodyTypes = info.flatMap {case x: MailBodyType => Some[MailBodyType](x); case _ => None} bodyTypes match { case PlainMailBodyType(txt) :: Nil => message.setText(txt) case _ => val multiPart = new MimeMultipart("alternative") bodyTypes.foreach { tab => val bp = new MimeBodyPart tab match { case PlainMailBodyType(txt) => bp.setText(txt, "UTF-8") case PlainPlusBodyType(txt, charset) => bp.setText(txt, charset) case XHTMLMailBodyType(html) => bp.setContent(html.toString, "text/html; charset=" + charSet) case XHTMLPlusImages(html, img@_*) => val html_mp = new MimeMultipart("related") val bp2 = new MimeBodyPart bp2.setContent(html.toString, "text/html; charset=" + charSet) html_mp.addBodyPart(bp2) img.foreach { i => val rel_bpi = new MimeBodyPart rel_bpi.setFileName(i.name) rel_bpi.setContentID(i.name) rel_bpi.setDisposition("inline") rel_bpi.setDataHandler(new javax.activation.DataHandler(new javax.activation.DataSource { def getContentType = i.mimeType def getInputStream = new java.io.ByteArrayInputStream(i.bytes) def getName = i.name def getOutputStream = throw new java.io.IOException("Unable to write to item") })) html_mp.addBodyPart(rel_bpi) } bp.setContent(html_mp) } multiPart.addBodyPart(bp) } message.setContent(multiPart); } MailerImpl.this.performTransportSend(message) } catch { case e: Exception => logger.error("Couldn't send mail", e) } } } protected def performTransportSend(msg: MimeMessage) = { import Props.RunModes._ (Props.mode match { case Development => devModeSend.vend case Test => testModeSend.vend case Staging => stagingModeSend.vend case Production => productionModeSend.vend case Pilot => pilotModeSend.vend case Profile => profileModeSend.vend }).apply(msg) } /** * How to send a message in dev mode. By default, use Transport.send(msg) */ lazy val devModeSend: Inject[MimeMessage => Unit] = new Inject[MimeMessage => Unit]((m: MimeMessage) => Transport.send(m)) {} /** * How to send a message in test mode. By default, log the message */ lazy val testModeSend: Inject[MimeMessage => Unit] = new Inject[MimeMessage => Unit]((m: MimeMessage) => logger.info("Sending Mime Message: "+m)) {} /** * How to send a message in staging mode. By default, use Transport.send(msg) */ lazy val stagingModeSend: Inject[MimeMessage => Unit] = new Inject[MimeMessage => Unit]((m: MimeMessage) => Transport.send(m)) {} /** * How to send a message in production mode. By default, use Transport.send(msg) */ lazy val productionModeSend: Inject[MimeMessage => Unit] = new Inject[MimeMessage => Unit]((m: MimeMessage) => Transport.send(m)) {} /** * How to send a message in pilot mode. By default, use Transport.send(msg) */ lazy val pilotModeSend: Inject[MimeMessage => Unit] = new Inject[MimeMessage => Unit]((m: MimeMessage) => Transport.send(m)) {} /** * How to send a message in profile mode. By default, use Transport.send(msg) */ lazy val profileModeSend: Inject[MimeMessage => Unit] = new Inject[MimeMessage => Unit]((m: MimeMessage) => Transport.send(m)) {} protected lazy val msgSender = new MsgSender /** * Asynchronously send an email. */ def sendMail(from: From, subject: Subject, rest: MailTypes*) { // forward it to an actor so there's no time on this thread spent sending the message msgSender ! MessageInfo(from, subject, rest.toList) } }
lift/lift
framework/lift-base/lift-util/src/main/scala/net/liftweb/util/Mailer.scala
Scala
apache-2.0
9,953
package dev.code_n_roll.gatling.jdbc.action import dev.code_n_roll.gatling.jdbc.builder.column.ColumnHelper._ import io.gatling.commons.stats.{KO, OK} import io.gatling.commons.util.DefaultClock import io.gatling.core.Predef._ import io.gatling.core.action.Action import io.gatling.core.session.Session import io.gatling.core.stats.writer.ResponseMessage import org.scalatest.Matchers._ import org.scalatest._ import scalikejdbc._ /** * Created by ronny on 12.05.17. */ class JdbcCreateTableActionSpec extends JdbcActionSpec { private val clock = new DefaultClock "JdbcCreateTableAction" should "use the request name in the log message" in { val requestName = "name" val latchAction = BlockingLatchAction() val action = JdbcCreateTableAction(requestName, "table", Seq(column(name("foo"), dataType("INTEGER"))), clock, statsEngine, latchAction) action.execute(session) waitForLatch(latchAction) statsEngine.dataWriterMsg should have length 1 statsEngine.dataWriterMsg.head(session).toOption.get.asInstanceOf[ResponseMessage].name should equal(requestName) } it should "create the table with given name and columns" in { val action = JdbcCreateTableAction("request", "new_table", Seq(column(name("foo"), dataType("INTEGER"), constraint("PRIMARY KEY"))), clock, statsEngine, next) action.execute(session) val result = DB readOnly { implicit session => sql"""SELECT * FROM information_schema.tables WHERE TABLE_NAME = 'NEW_TABLE' """.map(rs => rs.toMap()).single().apply() } result should not be empty } it should "log an OK message when successfully creating the table" in { val latchAction = BlockingLatchAction() val action = JdbcCreateTableAction("request", "ok_table", Seq(column(name("foo"), dataType("INTEGER"), constraint("PRIMARY KEY"))), clock, statsEngine, latchAction) action.execute(session) waitForLatch(latchAction) statsEngine.dataWriterMsg should have length 1 statsEngine.dataWriterMsg.head(session).toOption.get.asInstanceOf[ResponseMessage].status should equal(OK) } it should "log a KO message if an error occurs" in { val latchAction = BlockingLatchAction() val latchAction2 = BlockingLatchAction() val action = JdbcCreateTableAction("request", "ko_table", Seq(column(name("foo"), dataType("INTEGER"), constraint("PRIMARY KEY"))), clock, statsEngine, latchAction) val action2 = JdbcCreateTableAction("request", "ko_table", Seq(column(name("foo"), dataType("INTEGER"), constraint("PRIMARY KEY"))), clock, statsEngine, latchAction2) action.execute(session) waitForLatch(latchAction) action2.execute(session) waitForLatch(latchAction2) statsEngine.dataWriterMsg should have length 2 statsEngine.dataWriterMsg.head(session).toOption.get.asInstanceOf[ResponseMessage].status should equal(KO) } it should "throw an IAE when the column name cannot be validated" in { val action = JdbcCreateTableAction("request", "exc_table", Seq(column(name("${foo}"), dataType("INTEGER"), constraint("PRIMARY KEY"))), clock, statsEngine, next) an[IllegalArgumentException] should be thrownBy action.execute(session) } it should "throw an IAE when the column data type cannot be validated" in { val action = JdbcCreateTableAction("request", "exc_table", Seq(column(name("foo"), dataType("${INTEGER}"), constraint("PRIMARY KEY"))), clock, statsEngine, next) an[IllegalArgumentException] should be thrownBy action.execute(session) } it should "throw an IAE when the column constraint cannot be validated" in { val action = JdbcCreateTableAction("request", "exc_table", Seq(column(name("foo"), dataType("INTEGER"), constraint("${constraint}"))), clock, statsEngine, next) an[IllegalArgumentException] should be thrownBy action.execute(session) } it should "throw an IAE when the table name cannot be validated" in { val action = JdbcCreateTableAction("request", "${exc_table}", Seq(column(name("foo"), dataType("INTEGER"), constraint("PRIMARY KEY"))), clock, statsEngine, next) an[IllegalArgumentException] should be thrownBy action.execute(session) } it should "pass the session to the next action" in { val nextAction = NextAction(session) val action = JdbcCreateTableAction("request", "next_table", Seq(column(name("foo"), dataType("INTEGER"), constraint("PRIMARY KEY"))), clock, statsEngine, nextAction) action.execute(session) waitForLatch(nextAction) nextAction.called should be(true) } }
rbraeunlich/gatling-jdbc
src/test/scala/dev/code_n_roll/gatling/jdbc/action/JdbcCreateTableActionSpec.scala
Scala
apache-2.0
4,526
/* * Copyright (C) 2015 Cotiviti Labs ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.signalcollect.triplerush import org.apache.jena.graph.{ Capabilities, GraphEvents, GraphStatisticsHandler, Node, Node_ANY, Node_Blank, Node_Variable, Triple} import org.apache.jena.graph.impl.GraphBase import org.apache.jena.query.ARQ import org.apache.jena.rdf.model.ModelFactory import org.apache.jena.rdf.model.Model import org.apache.jena.sparql.engine.main.StageGenerator import org.apache.jena.util.iterator.{ ExtendedIterator, WrappedIterator } import com.signalcollect.triplerush.sparql.NodeConversion import com.signalcollect.triplerush.sparql.TripleRushIterator import com.signalcollect.triplerush.sparql.TripleRushStageGenerator import collection.JavaConversions._ /** * A TripleRush implementation of the Jena Graph interface. */ trait JenaGraphAdapter extends GraphBase with GraphStatisticsHandler { this: TripleRush => def getModel: Model = ModelFactory.createModelForGraph(this) // Set TripleRushStageGenerator as default for all queries. val tripleRushStageGen = ARQ.getContext.get(ARQ.stageGenerator) match { case g: TripleRushStageGenerator => new TripleRushStageGenerator(g.other) case otherGraph: StageGenerator => new TripleRushStageGenerator(otherGraph) case _: Any => throw new Exception("No valid stage generator found.") } ARQ.getContext.set(ARQ.stageGenerator, tripleRushStageGen) def getStatistic(s: Node, p: Node, o: Node): Long = { val q = Seq(arqNodesToPattern(s, p, o)) count(q) } override def createStatisticsHandler: JenaGraphAdapter = this /** * Meaning of prefixes in the encoded string: * - Everything that starts with a letter is interpreted as an IRI, * because their schema has to start with a letter. * - If a string starts with a digit or a hyphen, then it is interpreted as an integer literal. * - If a string starts with `"` or "<", then it is interpreted as a general literal. */ override def performAdd(triple: Triple): Unit = { addTriple(triple) } override def clear: Unit = { getEventManager.notifyEvent(this, GraphEvents.removeAll) throw new UnsupportedOperationException("TripleRush implementation does not support the `clear` operation.") } override def close: Unit = { super.close } def graphBaseFind(triplePattern: Triple): ExtendedIterator[Triple] = { val s = triplePattern.getSubject val p = triplePattern.getPredicate val o = triplePattern.getObject val pattern = arqNodesToPattern(s, p, o) val resultIterator = resultIteratorForQuery(Seq(pattern)) val concreteS = if (s.isConcrete) Some(NodeConversion.nodeToString(s)) else None val concreteP = if (p.isConcrete) Some(NodeConversion.nodeToString(p)) else None val concreteO = if (o.isConcrete) Some(NodeConversion.nodeToString(o)) else None val convertedIterator = TripleRushIterator.convert(concreteS, concreteP, concreteO, dictionary, resultIterator) WrappedIterator.createNoRemove(convertedIterator) } override def graphBaseContains(t: Triple): Boolean = { getStatistic(t.getSubject, t.getPredicate, t.getObject) >= 1 } override def graphBaseSize: Int = { val wildcard = Node.ANY val sizeAsLong = getStatistic(wildcard, wildcard, wildcard) if (sizeAsLong <= Int.MaxValue) { sizeAsLong.toInt } else { Int.MaxValue // Better than crashing? } } // TODO: Make more efficient by unrolling everything. // TODO: Does not support using the same variable/blank node multiple times. Test if this case needs to be supported. private[this] def arqNodesToPattern(s: Node, p: Node, o: Node): TriplePattern = { var nextVariableId = -1 @inline def nodeToId(n: Node): Int = { n match { case variable: Node_ANY => val id = nextVariableId nextVariableId -= 1 id case variable: Node_Variable => throw new UnsupportedOperationException("Variables not supported.") case blank: Node_Blank => val blankNodeString = NodeConversion.nodeToString(blank) dictionary(blankNodeString) case other @ _ => val otherNodeString = NodeConversion.nodeToString(other) dictionary(otherNodeString) } } val sId = nodeToId(s) val pId = nodeToId(p) val oId = nodeToId(o) TriplePattern(sId, pId, oId) } override val getCapabilities = new Capabilities { val sizeAccurate = true val addAllowed = true def addAllowed(everyTriple: Boolean) = true val deleteAllowed = false def deleteAllowed(everyTriple: Boolean) = false val iteratorRemoveAllowed = false val canBeEmpty = true val findContractSafe = true val handlesLiteralTyping = false } }
uzh/triplerush
src/main/scala/com/signalcollect/triplerush/JenaGraphAdapter.scala
Scala
apache-2.0
5,365
package mesosphere.marathon.core.task.bus import mesosphere.marathon.core.task.Task import mesosphere.marathon.core.task.bus.TaskStatusObservables.TaskStatusUpdate import mesosphere.marathon.state.{ PathId, Timestamp } import org.apache.mesos.Protos.TaskID import org.joda.time.DateTime class TaskStatusUpdateTestHelper(val wrapped: TaskStatusUpdate) { def withTaskId(taskId: String): TaskStatusUpdateTestHelper = { withTaskId(TaskID.newBuilder().setValue(taskId).build()) } def withTaskId(taskId: TaskID): TaskStatusUpdateTestHelper = TaskStatusUpdateTestHelper { wrapped.copy(taskId = Task.Id(taskId)) } def withTaskId(taskId: Task.Id): TaskStatusUpdateTestHelper = TaskStatusUpdateTestHelper { wrapped.copy(taskId = taskId) } def withAppId(appId: String): TaskStatusUpdateTestHelper = { withTaskId(TaskStatusUpdateTestHelper.newTaskID(appId)) } def withStatus(status: MarathonTaskStatus): TaskStatusUpdateTestHelper = TaskStatusUpdateTestHelper { wrapped.copy(status = status) } } object TaskStatusUpdateTestHelper { def apply(update: TaskStatusUpdate): TaskStatusUpdateTestHelper = new TaskStatusUpdateTestHelper(update) private def newTaskID(appId: String) = { Task.Id.forApp(PathId(appId)) } val taskId = newTaskID("/app") val running = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 30, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.running ) ) val runningHealthy = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 30, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.runningHealthy ) ) val runningUnhealthy = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 30, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.runningUnhealthy ) ) val staging = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 31, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.staging ) ) val finished = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 31, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.finished ) ) val lost = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 31, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.lost ) ) val killed = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 31, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.killed ) ) val error = TaskStatusUpdateTestHelper( TaskStatusUpdate( timestamp = Timestamp.apply(new DateTime(2015, 2, 3, 12, 31, 0, 0)), taskId = taskId, status = MarathonTaskStatusTestHelper.error ) ) }
pgkelley4/marathon
src/test/scala/mesosphere/marathon/core/task/bus/TaskStatusUpdateTestHelper.scala
Scala
apache-2.0
3,134
package org.oxygen.redio.items import net.minecraft.item.Item import org.oxygen.redio.CreativeTab object ItemScreen extends Item { setCreativeTab(CreativeTab) setMaxStackSize(1) setUnlocalizedName("screen") }
chenzhuoyu/RedIO
src/main/scala/org/oxygen/redio/items/ItemScreen.scala
Scala
lgpl-2.1
214
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.utils import java.util.concurrent._ import com.intel.analytics.bigdl.mkl.hardware.Affinity import com.intel.analytics.bigdl.mkl.{MKL, MklDnn => BackendMklDnn} import org.apache.commons.lang.exception.ExceptionUtils import org.apache.logging.log4j.LogManager import scala.collection.JavaConverters._ import scala.collection.mutable import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, Future} /** * A thread pool wrapper, provide some helper functions for multi-threading * * TODO `TreadPool` will give 2-version of thread pool, one uses scala version (`invokeAndWait`), * another is provided to Java (`invokeAndWait2`). The design is weird. We should refactor this * class later. */ class ThreadPool(private var poolSize: Int) { import ThreadPool._ private var mklPoolSize : Option[Int] = None private var threadPool: ExecutorService = null private var context = spawnThreadPool(poolSize) private def spawnThreadPool(poolSize: Int): ExecutionContext = { if (poolSize == 1) { threadPool = Executors.newFixedThreadPool(poolSize, new ThreadFactory { override def newThread(r: Runnable): Thread = { val t = Executors.defaultThreadFactory().newThread(r) t.setName("single-thread-computing") t.setDaemon(true) t } }) singleThreadPool } else { new ExecutionContext { if (threadPool != null) threadPool.shutdown() threadPool = Executors.newFixedThreadPool(poolSize, new ThreadFactory { override def newThread(r: Runnable): Thread = { val t = Executors.defaultThreadFactory().newThread(r) t.setName("default-thread-computing " + t.getId) t.setDaemon(true) t } }) def execute(runnable: Runnable) { threadPool.submit(runnable) } def reportFailure(t: Throwable) {} } } } def getPoolSize : Int = poolSize /** * Set MKL thread pool size * * @param size * @return */ def setMKLThread(size: Int): this.type = this.synchronized { require(MKL.isMKLLoaded) mklPoolSize = Some(size) (1 to poolSize).map(i => Future { MKL.setNumThreads(size) val tid = Thread.currentThread().getId() logger.info(s"Set mkl threads to $size on thread $tid") }(context)).foreach(Await.result(_, Duration.Inf)) this } def setMKLThreadOfMklDnnBackend(size: Int): this.type = this.synchronized { mklPoolSize = Some(size) this.invokeAndWait2((0 until 1).map(_ => () => { if (System.getProperty("bigdl.flushDenormalState", "true").toBoolean) { BackendMklDnn.setFlushDenormalState() } require(MKL.isMKLLoaded) require(BackendMklDnn.isLoaded) MKL.setNumThreads(size) BackendMklDnn.setNumThreads(size) if (!System.getProperty("bigdl.disableOmpAffinity", "false").toBoolean) { Affinity.setOmpAffinity() } })) this } /** * Invoke a batch of tasks and wait for all them finished * * @param tasks * @param timeout * @tparam T * @return */ def invokeAndWait[T](tasks: Seq[() => T], timeout: Duration = Duration.Inf): Seq[T] = { tasks.map(task => Future { try { task() } catch { case t : Throwable => logger.error("Error: " + ExceptionUtils.getStackTrace(t)) throw t } }(context)).map(future => { Await.result(future, timeout) }) } private type JavaFuture[T] = java.util.concurrent.Future[T] /** * Use java future to execute the tasks. It will be blocking until tasks completed. * If any task throws an exception, it will throw that exception in caller. * * @param tasks task sequence. each task's return type is T * @param timeout the maximum time to wait * @param timeUnit the time unit for the timeout * @tparam T return type of tasks * @return a sequence of Futures representing the tasks. */ def invokeAndWait2[T](tasks: Seq[() => T], timeout: Long = Long.MaxValue, timeUnit: TimeUnit = TimeUnit.NANOSECONDS): mutable.Buffer[JavaFuture[T]] = { val callables = tasks.map(task => new Callable[T] { override def call(): T = { task() } }) val resultFutures = threadPool.invokeAll(callables.asJava, timeout, timeUnit) // we should check all the future in the list, if any task has an exception, // we should throw it. var i = 0 while (i < resultFutures.size()) { try { resultFutures.get(i).get() } catch { case t: ExecutionException => throw t.getCause case i: InterruptedException => throw i.getCause } i += 1 } resultFutures.asScala } def invoke2[T](tasks: Seq[() => T]): Seq[JavaFuture[T]] = { tasks.map(task => new Callable[T] { override def call(): T = { try { task() } catch { case t : Throwable => logger.error("Error: " + ExceptionUtils.getStackTrace(t)) throw t } } }).map(threadPool.submit(_)) } /** * Invoke a batch of tasks * * @param tasks */ def invoke[T](tasks: Seq[() => T]): Seq[Future[T]] = { tasks.map(task => Future { try { task() } catch { case t : Throwable => logger.error("Error: " + ExceptionUtils.getStackTrace(t)) throw t } }(context)) } /** * Invoke a single tasks * * @param task */ def invoke[T](task: () => T): Future[T] = { Future { try { task() } catch { case t : Throwable => logger.error("Error: " + ExceptionUtils.getStackTrace(t)) throw t } }(context) } /** * Wait for all the tasks in the wait queue finish * * @param timeout */ def sync(futures: Seq[Future[_]], timeout: Duration = Duration.Inf): Unit = { futures.foreach(f => { Await.result(f, timeout) }) } /** * Set pool size * * @param size * @return */ def setPoolSize(size: Int): this.type = this.synchronized { if (size != poolSize) { context = spawnThreadPool(size) poolSize = size if(mklPoolSize.isDefined) { this.setMKLThread(mklPoolSize.get) } } this } } object ThreadPool { val singleThreadPool = new ExecutionContext { def execute(runnable: Runnable) { runnable.run() } def reportFailure(t: Throwable) {} } private val logger = LogManager.getLogger(getClass) }
intel-analytics/BigDL
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/ThreadPool.scala
Scala
apache-2.0
7,227
package com.twitter.finagle.memcached.protocol.text.server import com.twitter.finagle.memcached.protocol.text.Framer import com.twitter.finagle.memcached.util.ParserUtils import com.twitter.io.Buf private[finagle] class ServerFramer(storageCommands: Set[Buf]) extends Framer { // The data length is the 5th token, interpreted as an Int. def dataLength(tokens: Seq[Buf]): Int = if (tokens.nonEmpty) { val commandName = tokens.head if (storageCommands.contains(commandName) && tokens.length >= 5) { val dataLengthAsBuf = tokens(4) ParserUtils.bufToInt(dataLengthAsBuf) } else -1 } else -1 }
twitter/finagle
finagle-memcached/src/main/scala/com/twitter/finagle/memcached/protocol/text/server/ServerFramer.scala
Scala
apache-2.0
638
class D extends C { // mention abc name to check if local inheritance dependencies are _not_ included in member reference // extension of inheritance invalidation def bar(abc: Int): Int = abc }
dotty-staging/dotty
sbt-test/source-dependencies/local-class-inheritance/D.scala
Scala
apache-2.0
200
package io.soheila.um.daos.activities import io.soheila.um.entities.UserActivity import io.soheila.um.vos.activities.UserActivityQuery import io.soheila.commons.crud.CRUDDAO import io.soheila.commons.entities.Page import io.soheila.commons.exceptions.MongoDAOException import scala.concurrent.Future /** * Handles actions to users. */ trait UserActivityDAO extends CRUDDAO[UserActivity, String] { def find(userActivityQuery: UserActivityQuery, page: Int, limit: Int, sortFilter: Option[(String, Int)]): Future[Either[MongoDAOException, Page[UserActivity]]] }
esfand-r/soheila-um
src/main/scala/io/soheila/um/daos/activities/UserActivityDAO.scala
Scala
apache-2.0
566
/******************************************************************************* * Copyright (c) 2016 Logimethods * All rights reserved. This program and the accompanying materials * are made available under the terms of the MIT License (MIT) * which accompanies this distribution, and is available at * http://opensource.org/licenses/MIT *******************************************************************************/ package com.logimethods.smartmeter.generate import breeze.interpolation._ import breeze.linalg._ import java.time.DayOfWeek import java.time.DayOfWeek._ // https://github.com/scalanlp/breeze/wiki/Interpolation abstract class InterpolatedProfile { def vectorRange(vector: DenseVector[Double]): Double = { var min = Double.MaxValue var max = Double.MinPositiveValue for (value <- vector){ if (value > max) { max = value } if (value < min) { min = value } } max - min } val hours = DenseVector(0.0 to 24.0 by 3.0 toArray) val weekValues: DenseVector[Double] lazy val weekRange = vectorRange(weekValues) lazy val weekFunctionInterpolator = LinearInterpolator(hours, weekValues) def weekFunction(hour: Double) = weekFunctionInterpolator(hour) val weekendValues: DenseVector[Double] lazy val weekendRange = vectorRange(weekendValues) lazy val weekendFunctionInterpolator = LinearInterpolator(hours, weekendValues) def weekendFunction(hour: Double) = weekendFunctionInterpolator(hour) // Utilisé pour créer un biais systématique par usagePointPK + dayInWeek + hourInDay def bias(usagePointPK: String, dayInWeek: Int, hourInDay: Int, rndValue: Float, range: Double) = { 0.4 * range * (rndValue + (((usagePointPK.hashCode() + dayInWeek + hourInDay) % 20) / 20) - 0.5) } def valueAtDayAndHour(usagePointPK: String, dayInWeek: DayOfWeek, hourInDay: Int, rndValue: Float): Float = dayInWeek match { case MONDAY | TUESDAY | WEDNESDAY | THURSDAY | FRIDAY => math.abs((bias(usagePointPK, dayInWeek.ordinal(), hourInDay, rndValue, weekRange) + weekFunction(hourInDay)).toFloat) case SATURDAY | SUNDAY => math.abs((bias(usagePointPK, dayInWeek.ordinal(), hourInDay, rndValue, weekendRange) + weekendFunction(hourInDay)).toFloat) } } /** * Consumer */ object ConsumerInterpolatedDemandProfile extends InterpolatedProfile { val weekValues = DenseVector(/*0am*/ 100.0, /*3am*/ 100.0, /*6am*/ 80.0, /*9am*/ 50.0, /*12am*/ 60.0, /*3pm*/ 80.0, /*6pm*/ 90.0, /*9pm*/ 115.0, /*12pm*/ 100.0) val weekendValues = DenseVector(/*0am*/ 100.0, /*3am*/ 100.0, /*6am*/ 80.0, /*9am*/ 90.0, /*12am*/ 80.0, /*3pm*/ 90.0, /*6pm*/ 115.0, /*9pm*/ 90.0, /*12pm*/ 100.0) } object ConsumerInterpolatedVoltageProfile extends InterpolatedProfile { val weekValues = DenseVector(/*0am*/ 117.0, /*3am*/ 117.0, /*6am*/ 118.0, /*9am*/ 116.0, /*12am*/ 115.0, /*3pm*/ 114.0, /*6pm*/ 116.0, /*9pm*/ 118.0, /*12pm*/ 117.0) val weekendValues = DenseVector(/*0am*/ 117.0, /*3am*/ 117.0, /*6am*/ 116.0, /*9am*/ 116.0, /*12am*/ 118.0, /*3pm*/ 119.0, /*6pm*/ 119.0, /*9pm*/ 118.0, /*12pm*/ 117.0) } /** * Business */ object BusinessInterpolatedDemandProfile extends InterpolatedProfile { val weekValues = DenseVector(/*0am*/ 60.0, /*3am*/ 80.0, /*6am*/ 160.0, /*9am*/ 280.0, /*12am*/ 300.0, /*3pm*/ 290.0, /*6pm*/ 180.0, /*9pm*/ 80.0, /*12pm*/ 60.0) val weekendValues = DenseVector(/*0am*/ 100.0, /*3am*/ 80.0, /*6am*/ 70.0, /*9am*/ 70.0, /*12am*/ 60.0, /*3pm*/ 70.0, /*6pm*/ 80.0, /*9pm*/ 90.0, /*12pm*/ 100.0) } object BusinessInterpolatedVoltageProfile extends InterpolatedProfile { val weekValues = DenseVector(/*0am*/ 117.0, /*3am*/ 117.0, /*6am*/ 118.0, /*9am*/ 119.0, /*12am*/ 119.0, /*3pm*/ 120.0, /*6pm*/ 118.0, /*9pm*/ 117.0, /*12pm*/ 117.0) val weekendValues = DenseVector(/*0am*/ 117.0, /*3am*/ 116.0, /*6am*/ 116.0, /*9am*/ 115.0, /*12am*/ 114.0, /*3pm*/ 114.0, /*6pm*/ 114.0, /*9pm*/ 115.0, /*12pm*/ 117.0) } /** * Industry */ object IndustryInterpolatedDemandProfile extends InterpolatedProfile { val weekValues = DenseVector(/*0am*/ 3000.0, /*3am*/ 3000.0, /*6am*/ 3200.0, /*9am*/ 3300.0, /*12am*/ 3200.0, /*3pm*/ 3300.0, /*6pm*/ 3100.0, /*9pm*/ 3000.0, /*12pm*/ 3000.0) val weekendValues = DenseVector(/*0am*/ 2700.0, /*3am*/ 2700.0, /*6am*/ 2750.0, /*9am*/ 2800.0, /*12am*/ 2800.0, /*3pm*/ 2850.0, /*6pm*/ 2750.0, /*9pm*/ 2700.0, /*12pm*/ 2700.0) } object IndustryInterpolatedVoltageProfile extends InterpolatedProfile { val weekValues = DenseVector(/*0am*/ 120.0, /*3am*/ 120.0, /*6am*/ 121.0, /*9am*/ 122.0, /*12am*/ 121.0, /*3pm*/ 122.0, /*6pm*/ 121.0, /*9pm*/ 121.0, /*12pm*/ 120.0) val weekendValues = DenseVector(/*0am*/ 120.0, /*3am*/ 120.0, /*6am*/ 121.0, /*9am*/ 121.0, /*12am*/ 121.0, /*3pm*/ 121.0, /*6pm*/ 120.0, /*9pm*/ 120.0, /*12pm*/ 120.0) } /** * InterpolatedProfileByUsagePoint */ object InterpolatedProfileByUsagePoint extends Profile { val caseNb = 10 val caseFn = (usagePointPK: String) => usagePointPK.hashCode().abs % caseNb def demandAtDayAndHour(usagePointPK: String, dayInWeek: DayOfWeek, hourInDay: Int, rndValue: Float): Float = caseFn(usagePointPK) match { case 0 | 1 | 2 => BusinessInterpolatedDemandProfile.valueAtDayAndHour(usagePointPK, dayInWeek, hourInDay, rndValue) case 3 | 4 => IndustryInterpolatedDemandProfile.valueAtDayAndHour(usagePointPK, dayInWeek, hourInDay, rndValue) case 5 | 6 | 7 | 8 | 9 => ConsumerInterpolatedDemandProfile.valueAtDayAndHour(usagePointPK, dayInWeek, hourInDay, rndValue) } def voltageAtDayAndHour(usagePointPK: String, dayInWeek: DayOfWeek, hourInDay: Int, rndValue: Float): Float = caseFn(usagePointPK) match { case 0 | 1 | 2 => BusinessInterpolatedVoltageProfile.valueAtDayAndHour(usagePointPK, dayInWeek, hourInDay, rndValue) case 3 | 4 => IndustryInterpolatedVoltageProfile.valueAtDayAndHour(usagePointPK, dayInWeek, hourInDay, rndValue) case 5 | 6 | 7 | 8 | 9 => ConsumerInterpolatedVoltageProfile.valueAtDayAndHour(usagePointPK, dayInWeek, hourInDay, rndValue) } }
Logimethods/smart-meter
dockerfile-app_inject/src/main/scala/com/logimethods/smartmeter/generate/InterpolatedProfile.scala
Scala
mit
6,420
package scala.virtualization.lms.internal /* Defines OpenCL specific device transfer functions */ trait OpenCLDeviceTransfer extends AbstractDeviceTransfer { this: OpenCLCodegen => val IR: Expressions import IR._ def emitSendSlave(tp: Manifest[_]): (String,String) = { if (isPrimitiveType(tp)) { val out = new StringBuilder val signature = "%s sendOpenCL_%s(%s sym)".format(remap(tp),mangledName(remap(tp)),remap(tp)) out.append(signature + " {\\n") out.append("\\treturn sym;\\n") out.append("}\\n") (signature+";\\n", out.toString) } else { throw new GenerationFailedException("OpenCLDeviceTransfer: Unknown type " + tp.toString) } } def emitRecvSlave(tp: Manifest[_]): (String,String) = { if (isPrimitiveType(tp)) { val out = new StringBuilder val signature = "%s recvOpenCL_%s(%s sym)".format(remap(tp),mangledName(remap(tp)),remap(tp)) out.append(signature + " {\\n") out.append("\\treturn sym;\\n") out.append("}\\n") (signature+";\\n", out.toString) } else { throw new GenerationFailedException("OpenCLDeviceTransfer: Unknown type " + tp.toString) } } /* def emitSendViewSlave(sym: Sym[Any]): (String,String) = { if (isPrimitiveType(sym.tp)) { val out = new StringBuilder val signature = "%s sendViewOpenCL_%s(%s %s)".format(remap(sym.tp),quote(sym),remap(sym.tp),quote(sym)) out.append(signature + " {\\n") out.append("\\tassert(false);\\n") out.append("\\treturn %s;\\n".format(quote(sym))) out.append("}\\n") (signature+";\\n", out.toString) } else { throw new GenerationFailedException("OpenCLDeviceTransfer: Unknown type " + sym.tp.toString) } } def emitRecvViewSlave(sym: Sym[Any]): (String,String) = { if (isPrimitiveType(sym.tp)) { val out = new StringBuilder val signature = "%s recvViewOpenCL_%s(%s %s)".format(remap(sym.tp),quote(sym),remap(sym.tp),quote(sym)) out.append(signature + " {\\n") out.append("\\tassert(false);\\n") out.append("\\treturn %s;\\n".format(quote(sym))) out.append("}\\n") (signature+";\\n", out.toString) } else { throw new GenerationFailedException("OpenCLDeviceTransfer: Unknown type " + sym.tp.toString) } } */ def emitSendUpdateSlave(tp: Manifest[_]): (String,String) = { if(isPrimitiveType(tp)) { val out = new StringBuilder val signature = "void sendUpdateOpenCL_%s(%s sym)".format(mangledName(remap(tp)),remap(tp)) out.append(signature + " {\\n") out.append("\\tassert(false);\\n") out.append("}\\n") (signature+";\\n", out.toString) } else { throw new GenerationFailedException("OpenCLDeviceTransfer: Unknown type " + tp.toString) } } def emitRecvUpdateSlave(tp: Manifest[_]): (String,String) = { if(isPrimitiveType(tp)) { val out = new StringBuilder val signature = "void recvUpdateOpenCL_%s(%s sym)".format(mangledName(remap(tp)),remap(tp)) out.append(signature + " {\\n") out.append("\\tassert(false);\\n") out.append("}\\n") (signature+";\\n", out.toString) } else { throw new GenerationFailedException("OpenCLDeviceTransfer: Unknown type " + tp.toString) } } }
afernandez90/virtualization-lms-core
src/internal/OpenCLDeviceTransfer.scala
Scala
bsd-3-clause
3,279
class JustGeneric[T, G] new JustGeneric[Int, /*caret*/] //T, G
triggerNZ/intellij-scala
testdata/parameterInfo/typeParameterInfo/SimpleTests/JustGeneric.scala
Scala
apache-2.0
63
/* * Copyright 2016 Nicolas Rinaudo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kantan.codecs /** Type class that represents data types that have an "empty" value. * * The purpose of this type class is to allow automatic derivation of [[Decoder]] for decoded types that might not * have a value - `Option`, `List`... * * In theory, there should rarely be a need to interact directly with this type class, and one is usually better * served by obtaining the [[Decoder]] instance for `Option` and mapping on it. * */ trait Optional[A] extends Serializable { def empty: A def isEmpty(a: A): Boolean = a == empty } object Optional { def apply[A](implicit ev: Optional[A]): Optional[A] = macro imp.summon[Optional[A]] def apply[A](a: A): Optional[A] = new Optional[A] { override val empty = a } implicit val optString: Optional[String] = Optional("") implicit def optSeq[A]: Optional[Seq[A]] = Optional(Seq.empty[A]) implicit def optOption[A]: Optional[Option[A]] = Optional(Option.empty[A]) }
nrinaudo/kantan.codecs
core/shared/src/main/scala/kantan/codecs/Optional.scala
Scala
apache-2.0
1,569
/* _ _ _ *\\ ** | (_) | | ** ** ___| |_ __| | ___ clide 2 ** ** / __| | |/ _` |/ _ \\ (c) 2012-2014 Martin Ring ** ** | (__| | | (_| | __/ http://clide.flatmap.net ** ** \\___|_|_|\\__,_|\\___| ** ** ** ** This file is part of Clide. ** ** ** ** Clide is free software: you can redistribute it and/or modify ** ** it under the terms of the GNU Lesser General Public License as ** ** published by the Free Software Foundation, either version 3 of ** ** the License, or (at your option) any later version. ** ** ** ** Clide is distributed in the hope that it will be useful, ** ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** ** GNU General Public License for more details. ** ** ** ** You should have received a copy of the GNU Lesser General Public ** ** License along with Clide. ** ** If not, see <http://www.gnu.org/licenses/>. ** \\* */ package net.flatmap.collaboration import scala.collection.mutable.Buffer import scala.util.{Success, Try} class Server[T](initialState: Document[T]) { private val history: Buffer[Operation[T]] = Buffer.empty private var combinedHistory: Operation[T] = Operation(List(Retain(initialState.content.length))) private def appendOperation(op: Operation[T]) = { history.append(op) combinedHistory = Operation.compose(combinedHistory,op).get } private var state: Document[T] = initialState def text = state.content def revision = history.length def getHistory = history.view def getCombinedHistory = combinedHistory /** * an operation arrives from a client * * @param rev the revision the client refers to */ def applyOperation(operation: Operation[T], rev: Long): Try[Operation[T]] = { val result = for { concurrentOps <- Try { require((0 to revision) contains rev, "invalid revision: " + rev) history.view(rev.toInt, revision) // TODO: Long Revisions } operation <- concurrentOps.foldLeft(Success(operation): Try[Operation[T]]) { case (a,b) => a.flatMap(a => Operation.transform(a,b).map(_._1)) } nextState <- state(operation) } yield (nextState, operation) result.map { case (nextState,operation) => appendOperation(operation) state = nextState operation } } /** * transform a client annotation to fit the most recent revision * * @param rev the revision the client refers to */ def transformAnnotation(rev: Int, as: Annotations): Try[Annotations] = for { concurrentOps <- Try { require((0 to revision) contains rev, "invalid revision: " + rev) history.view(rev, revision) } annotation <- concurrentOps.foldLeft(Success(as): Try[Annotations]) { case (a,b) => a.flatMap(a => Annotations.transform(a,b)) } } yield annotation }
flatmap/cobra
modules/cobra-common/shared/src/main/scala/net/flatmap/collaboration/Server.scala
Scala
lgpl-3.0
3,749
package controllers import java.io.File import java.nio.file.{Files, Paths} import javax.inject._ import grammars.Schema import grammars.tsql.TSqlFileVisitor import play.api.data.Forms._ import play.api.data._ import play.api.http.ContentTypes import play.api.libs.json.Json import play.api.mvc._ import services.Viz /** * This controller creates an `Action` to handle HTTP requests to the * application's home page. */ @Singleton class ParseController @Inject()(cc: ControllerComponents) extends AbstractController(cc) with play.api.i18n.I18nSupport { // val dir = "/home/mickael/work/vp/rme-web/db/" val dir = "/data/work/vp/dev/tsqlcontrolsource/" /** * Create an Action to render an HTML page with a welcome message. * The configuration in the `routes` file means that this method * will be called when the application receives a `GET` request with * a path of `/`. */ def index = Action { implicit request => var sqlForm = Form( mapping( "sql" -> text )(sqlFormData.apply)(sqlFormData.unapply) ) sqlForm.bindFromRequest.fold( formWithErrors => { println("error") println(formWithErrors.toString) BadRequest(views.html.parse(formWithErrors, 0, "")) }, sql => { println("ok") println(sql.sql) sqlForm = sqlForm.fill(sql) val vis = new TSqlFileVisitor("file") vis.getSchema(sql.sql) match { case null => BadRequest(vis.getErrors.mkString) case schema: Schema => println(schema.marshallJson()) Ok(views.html.parse(sqlForm, schema.tables.length, schema.marshallJson())) } } ) } def image = Action { implicit request => var sqlForm = Form( mapping( "sql" -> text )(sqlFormData.apply)(sqlFormData.unapply) ) sqlForm.bindFromRequest.fold( formWithErrors => { println("error") println(formWithErrors.toString) BadRequest(views.html.parse(formWithErrors, 0, "")) }, sql => { sqlForm = sqlForm.fill(sql) val vis = new TSqlFileVisitor("file") vis.getSchema(sql.sql) match { case null => BadRequest(vis.getErrors.mkString) case schema: Schema => val viz = new Viz() val svg = viz.getSVG(schema.marshallGraph()) Ok(svg).as("image/svg+xml") } } ) } def imagePost = Action { implicit request => var sqlForm = Form( mapping( "sql" -> text )(sqlFormData.apply)(sqlFormData.unapply) ) sqlForm.bindFromRequest.fold( formWithErrors => { println("error") println(formWithErrors.toString) BadRequest(views.html.parse(formWithErrors, 0, "")) }, sql => { sqlForm = sqlForm.fill(sql) val vis = new TSqlFileVisitor("file") vis.getSchema(sql.sql) match { case null => BadRequest(vis.getErrors.mkString) case schema: Schema => val viz = new Viz() val svg = viz.getSVG(schema.marshallGraph()) Ok(svg).as("image/svg+xml") } } ) } def serversList = Action { implicit request => // todo conf val dirs = getListOfDirs(dir).map(d => Json.obj( "name" -> d.getName, "databases" -> getListOfDirs(d.getAbsolutePath).map(sd => Json.obj( "server" -> d.getName, "name" -> sd.getName, "fullName" -> s"${d.getName} / ${sd.getName}" ) ) ) ) Ok(Json.toJson(dirs)).as(ContentTypes.JSON) } def databasesList = Action { implicit request => // todo conf val dirs = getListOfDirs(dir).flatMap(d => getListOfDirs(d.getAbsolutePath).map(sd => Json.obj( "server" -> d.getName, "database" -> sd.getName ) )) Ok(Json.toJson(dirs)).as(ContentTypes.JSON) } def programmabilitySPList(serverName: String, databaseName: String) = Action { implicit request => // todo conf val dir = this.dir + serverName + "/" + databaseName + "/Programmability/Stored Procedures" val spFilenames = getListOfFiles(dir).map(_.getName).sorted // Only filename val sps = spFilenames.map(s => Json.obj( "filename" -> s )) // Filename with content // val sps:immutable.Map[String, String] = spFilenames.map(file => { // val source = scala.io.Source.fromFile(dir + "/" + file) // val sql = try source.mkString finally source.close() // file -> sql // }).toMap // println(spFilenames) // println(Json.toJson(spFilenames)) // println(Json.stringify(Json.toJson(spFilenames))) Ok(Json.toJson(sps)).as(ContentTypes.JSON) } def programmabilitySPGet(serverName: String, databaseName: String, spFilename: String) = Action { implicit request => // todo conf val dir = this.dir + serverName + "/" + databaseName + "/Programmability/Stored Procedures" val source = scala.io.Source.fromFile(dir + "/" + spFilename)(scala.io.Codec.ISO8859) val sql = try source.mkString finally source.close() // var sql = new String(Files.readAllBytes(Paths.get(dir + "/" + spFilename))) // println(spFilenames) // println(Json.toJson(spFilenames)) // println(Json.stringify(Json.toJson(spFilenames))) Ok(sql).as(ContentTypes.TEXT) } def getListOfDirs(dir: String): List[File] = { val d = new File(dir) if (d.exists && d.isDirectory) { d.listFiles.filter(f => f.isDirectory && !f.getName.startsWith(".")).toList } else { List[File]() } } def getListOfFiles(dir: String): List[File] = { val d = new File(dir) if (d.exists && d.isDirectory) { d.listFiles.filter(_.isFile).toList } else { List[File]() } } } case class sqlFormData(sql: String)
BrunoSabos/rme-web
app/controllers/ParseController.scala
Scala
gpl-3.0
5,905
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn import java.util import com.intel.analytics.bigdl.nn.abstractnn.TensorModule import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import scala.reflect.ClassTag /** * Upsampling layer for 1D inputs. * Repeats each temporal step length times along the time axis. * * If input's size is (batch, steps, features), * then the output's size is (batch, steps * length, features) * * @param length integer, upsampling factor. * @tparam T The numeric type in this module, usually which are [[Float]] or [[Double]] */ class UpSampling1D[T: ClassTag] (val length: Int) (implicit ev: TensorNumeric[T]) extends TensorModule[T] { require(length > 0, "UpSampling1D's length should be bigger than 0," + s"but got $length") override def updateOutput(input: Tensor[T]): Tensor[T] = { require(input.dim() == 3, "UpSampling1D only supports 3D input") require(input.isContiguous(), "input should be contiguous") val inputLength = input.size(3) val outputLength = inputLength * length output.resize(input.size(1), input.size(2) * length, input.size(3)) val inputData = input.storage().array() val inputOffset = input.storageOffset() - 1 val outputData = output.storage().array() val outputOffset = output.storageOffset() - 1 var i = 0 while (i < input.size(1) * input.size(2)) { var j = 0 while (j < length) { ev.arraycopy(inputData, inputOffset + i * inputLength, outputData, outputOffset + i * outputLength + inputLength * j, inputLength) j += 1 } i += 1 } output } override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = { require(gradOutput.dim() == 3, "UpSampling1D only supports 3D input") require(gradOutput.isContiguous(), "gradOutput should be contiguous") gradInput.resizeAs(input).zero() val gradInputData = gradInput.storage().array() val gradInputOffset = gradInput.storageOffset() - 1 val gradOutputData = gradOutput.storage().array() val gradOutputOffset = gradOutput.storageOffset() - 1 val gradInputLength = gradInput.size(3) val gradOutputLength = gradInputLength * length var i = 0 while (i < input.size(1) * input.size(2)) { var j = 0 while (j < length) { ev.axpy(gradInputLength, ev.one, gradOutputData, gradOutputOffset + i * gradOutputLength + gradInputLength * j, 1, gradInputData, gradInputOffset + i * gradInputLength, 1) j += 1 } i += 1 } gradInput } } object UpSampling1D { def apply[T: ClassTag](length: Int) (implicit ev: TensorNumeric[T]): UpSampling1D[T] = { new UpSampling1D(length) } }
qiuxin2012/BigDL
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/UpSampling1D.scala
Scala
apache-2.0
3,421
trait T { type U } // "abstract override" shouldn't be allowed on types trait T1 extends T { abstract override type U = Int }
yusuke2255/dotty
tests/untried/neg/t6795.scala
Scala
bsd-3-clause
126
package domala.tests.entity import java.time.{LocalDate, LocalDateTime, LocalTime} import domala._ import domala.jdbc.Config import domala.tests.TestConfig import org.scalatest.{BeforeAndAfter, FunSuite} import domala.jdbc.Result class JavaDateTestSuite extends FunSuite with BeforeAndAfter { implicit val config: Config = TestConfig val dao: JavaDateDao = JavaDateDao.impl before { Required { dao.create() } } after { Required { dao.drop() } } test("select Java Date") { Required { assert(dao.select(0) === JavaDateEntity(0, null, None, null, None, null, None)) } } test("insert Java Date") { Required { val entity = JavaDateEntity( 1, LocalDate.of(2017, 9, 5), Some(LocalDate.of(2020, 12, 31)), LocalTime.of(12, 59, 59), Some(LocalTime.of(13, 0, 0)), LocalDateTime.of(2017, 9, 5, 12, 59, 59, 999999999), Some(LocalDateTime.of(2020, 12, 31, 13, 0, 0, 1)) ) dao.insert(entity) assert(dao.select(1) === entity) } } test("select Java Date holder") { Required { assert(dao.selectHolder(0) === JavaDateHolderEntity(0, null, None, null, None, null, None)) } } test("insert Java Date holder") { Required { val entity = JavaDateHolderEntity( 1, LocalDateHolder(LocalDate.of(2017, 9, 5)), Some(LocalDateHolder(LocalDate.of(2020, 12, 31))), LocalTimeHolder(LocalTime.of(12, 59, 59)), Some(LocalTimeHolder(LocalTime.of(13, 0, 0))), LocalDateTimeHolder(LocalDateTime.of(2017, 9, 5, 12, 59, 59, 999999999)), Some(LocalDateTimeHolder(LocalDateTime.of(2020, 12, 31, 13, 0, 0, 1))) ) dao.insertHolder(entity) assert(dao.selectHolder(1) == entity) } } test("insert Java Date AnyVal") { Required { val entity = JavaDateValEntity( 1, LocalDateVal(LocalDate.of(2017, 9, 5)), Some(LocalDateVal(LocalDate.of(2020, 12, 31))), LocalTimeVal(LocalTime.of(12, 59, 59)), Some(LocalTimeVal(LocalTime.of(13, 0, 0))), LocalDateTimeVal(LocalDateTime.of(2017, 9, 5, 12, 59, 59, 999999999)), Some(LocalDateTimeVal(LocalDateTime.of(2020, 12, 31, 13, 0, 0, 1))) ) dao.insertVal(entity) assert(dao.selectVal(1) == entity) } } } @Entity @Table(name = "java_date") case class JavaDateEntity( id : Int, basicDate : LocalDate, optionDate : Option[LocalDate], basicTime : LocalTime, optionTime : Option[LocalTime], basicDateTime : LocalDateTime, optionDateTime : Option[LocalDateTime] ) @Holder case class LocalDateHolder(value: LocalDate) @Holder case class LocalTimeHolder(value: LocalTime) @Holder case class LocalDateTimeHolder(value: LocalDateTime) @Entity @Table(name = "java_date") case class JavaDateHolderEntity( id : Int, basicDate : LocalDateHolder, optionDate : Option[LocalDateHolder], basicTime : LocalTimeHolder, optionTime : Option[LocalTimeHolder], basicDateTime : LocalDateTimeHolder, optionDateTime : Option[LocalDateTimeHolder] ) case class LocalDateVal(value: LocalDate) extends AnyVal case class LocalTimeVal(value: LocalTime) extends AnyVal case class LocalDateTimeVal(value: LocalDateTime) extends AnyVal @Entity @Table(name = "java_date") case class JavaDateValEntity( id : Int, basicDate : LocalDateVal, optionDate : Option[LocalDateVal], basicTime : LocalTimeVal, optionTime : Option[LocalTimeVal], basicDateTime : LocalDateTimeVal, optionDateTime : Option[LocalDateTimeVal] ) @Dao(config = TestConfig) trait JavaDateDao { @Script(sql = """ create table java_date( id int not null identity primary key, basic_date date, option_date date, basic_time time, option_time time, basic_date_time timestamp, option_date_time timestamp ); insert into java_date (id, basic_date, option_date, basic_time, option_time, basic_date_time, option_date_time) values(0, null, null, null, null, null, null); """) def create() @Script(sql = """ drop table java_date; """) def drop() @Select("select * from java_date where id = /* id */0") def select(id: Int): JavaDateEntity @Insert def insert(entity: JavaDateEntity): Result[JavaDateEntity] @Select("select * from java_date where id = /* id */0") def selectHolder(id: Int): JavaDateHolderEntity @Insert def insertHolder(entity: JavaDateHolderEntity): Result[JavaDateHolderEntity] @Select("select * from java_date where id = /* id */0") def selectVal(id: Int): JavaDateValEntity @Insert def insertVal(entity: JavaDateValEntity): Result[JavaDateValEntity] }
bakenezumi/domala
paradise/src/test/scala/domala/tests/entity/JavaDateTestSuite.scala
Scala
apache-2.0
4,593
package net.pointsgame.engine import net.pointsgame.engine.Dot._ import scala.annotation.tailrec // This class needs to be very quick. // Nulls and other strange stuff is used here. //public class DotType protected class Dot( // dots in directions: up, right, down, left //var u: Dot, var ur: Dot, var r: Dot, var rd: Dot, //var d: Dot, var dl: Dot, var l: Dot, var lu: Dot, //var treeNext: Dot, var treeJump: Dot, val id: Int, val x:Int, val y:Int ) { var dotType: Int = empty var u, ur, r, rd, d, dl, l, lu: Dot = null var treeNext: Dot = null var surroundingId:Int = 0 def isFreeToPlace = dotType == empty def isPowered(red: Boolean) = if (red) { List(redActive, blueDefeated, emptyEatenByRed).contains(dotType) } else { List(blueActive, redDefeated, emptyEatenByBlue).contains(dotType) } @tailrec final def getRoot: Dot = if (treeNext == null) this else treeNext.getRoot override def toString = s"($x-$y)" override def hashCode() = id override def equals(a : Any) = this.id == a.asInstanceOf[Dot].id } protected object Dot { val empty = 0 val blueActive = 1 val redActive = 2 val blueDefeated = 5 val redDefeated = 6 val emptyEatenByBlue = 7 val emptyEatenByRed = 8 // these shouldn't be used in an AI val blueTired = 3 val redTired = 4 val blueCtrl = 9 val redCtrl = 10 @tailrec def makeRoot(newRoot: Dot, forWho: Dot): Unit = { val nextNext = forWho.treeNext forWho.treeNext = newRoot if (nextNext != null) Dot.makeRoot(forWho, nextNext) } def eat(dot:Dot, red_? : Boolean): Unit = { val emp = if (red_?) emptyEatenByRed else emptyEatenByBlue val bl = if (red_?) blueDefeated else blueActive val rd = if (red_?) redDefeated else redActive dot.dotType = dot.dotType match { case `empty` => emp case `blueActive` => bl case `redActive` => rd case `blueDefeated` => bl case `redDefeated` => rd case `emptyEatenByBlue` => emp case `emptyEatenByRed` => emp } } //@inline def isAnyRed = (dotType & anyRed) != 0 //@inline def isAnyBlue = (dotType & anyBlue) != 0 //@inline def isFreeToPlace = (dotType & freeToPlace) != 0 //private val anyRed = 1 //private val anyBlue = 2 //private val freeToPlace = 4 //private val surroundedByRed = 8 //private val surroundedByBlue = 16 //private val bluePowered = 32 //private val redPowered = 64 // //val empty = freeToPlace //val blueActive = anyBlue | bluePowered //val redActive = anyRed | redPowered //val blueTired = anyBlue | surroundedByBlue | bluePowered //val redTired = anyRed | surroundedByRed | redPowered //val blueDefeated = anyBlue | surroundedByRed | redPowered //val redDefeated = anyRed | surroundedByBlue | bluePowered //val emptyEatenByBlue = surroundedByBlue | bluePowered //val emptyEatenByRed = surroundedByRed | redPowered //val blueCtrl = surroundedByBlue | freeToPlace | bluePowered //val redCtrl = surroundedByRed | freeToPlace | redPowered }
vn971/points-wip
modules/game-engine-experiments/src/main/scala/net/pointsgame/engine/Dot.scala
Scala
agpl-3.0
2,930
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.keras.nn import com.intel.analytics.bigdl.keras.KerasBaseSpec import com.intel.analytics.bigdl.dllib.nn.abstractnn.AbstractModule import com.intel.analytics.bigdl.dllib.nn.internal.{Conv3D, Convolution3D, Sequential => KSequential} import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.utils.Shape import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest import scala.util.Random class Convolution3DSpec extends KerasBaseSpec { "Convolution3D" should "be the same as Keras" in { val kerasCode = """ |input_tensor = Input(shape=[3, 32, 32, 32]) |input = np.random.random([1, 3, 32, 32, 32]) |output_tensor = Convolution3D(12, 2, 1, 3, subsample=(1, 2, 3), | dim_ordering="th")(input_tensor) |model = Model(input=input_tensor, output=output_tensor) """.stripMargin val seq = KSequential[Float]() val layer = Convolution3D[Float](12, 2, 1, 3, subsample = (1, 2, 3), inputShape = Shape(3, 32, 32, 32)) seq.add(layer) checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]], kerasCode, precision = 1e-2) } "Convolution3D without bias" should "be the same as Keras" in { val kerasCode = """ |input_tensor = Input(shape=[4, 16, 20, 32]) |input = np.random.random([1, 4, 16, 20, 32]) |output_tensor = Convolution3D(8, 2, 2, 4, activation="relu", bias=False, | border_mode="same", dim_ordering="th")(input_tensor) |model = Model(input=input_tensor, output=output_tensor) """.stripMargin val seq = KSequential[Float]() val layer = Convolution3D[Float](8, 2, 2, 4, activation = "relu", bias = false, borderMode = "same", inputShape = Shape(4, 16, 20, 32)) seq.add(layer) checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]], kerasCode, precision = 1e-3) } } class Convolution3DSerialTest extends ModuleSerializationTest { override def test(): Unit = { val layer = Convolution3D[Float](12, 2, 1, 3, inputShape = Shape(3, 32, 32, 32)) layer.build(Shape(2, 3, 32, 32, 32)) val input = Tensor[Float](2, 3, 32, 32, 32).apply1(_ => Random.nextFloat()) runSerializationTest(layer, input) } }
intel-analytics/BigDL
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/keras/nn/Convolution3DSpec.scala
Scala
apache-2.0
3,002
package generic_signature.akka class ActorRef class Actor2 { def actorOf(clazz: Class[_ <: Actor]): ActorRef = new ActorRef } object Actor3 { def actorOf(clazz: Class[_ <: Actor]): ActorRef = new ActorRef }
Kwestor/scala-ide
org.scala-ide.sdt.core.tests/test-workspace/simple-structure-builder/src/generic_signature/akka/Actor.scala
Scala
bsd-3-clause
214
package hyperion.ws import akka.actor.{Actor, ActorLogging, ActorRef} import hyperion.MessageDistributor.RegisterReceiver import hyperion.p1.TelegramReceived /** * Actor that forwards telegrams to a WebSocket connection * * @param source Ref to an Actor that publishes the telegrams to a [[Stream]]. * @param messageDistributor Ref to the Actor that distributes messages. */ class ActualValuesHandlerActor(val source: ActorRef, val messageDistributor: ActorRef) extends Actor with ActorLogging { override def preStart(): Unit = { log.debug("Registering for live updates") messageDistributor ! RegisterReceiver } override def receive: Receive = { case tr: TelegramReceived => source ! tr case a: Any => log.debug(s"Ignoring $a") } }
mthmulders/hyperion
app/src/main/scala/hyperion/ws/ActualValuesHandlerActor.scala
Scala
mit
782
//package com.sksamuel.elastic4s.search // //import com.sksamuel.elastic4s.DocumentRef //import com.sksamuel.elastic4s.testkit.{ClassloaderLocalNodeProvider, ElasticSugar} //import org.scalatest.mockito.MockitoSugar //import org.scalatest.{FlatSpec, Matchers} // //class PercolateTest extends FlatSpec with Matchers with MockitoSugar with ElasticSugar with ClassloaderLocalNodeProvider { // // client.execute { // createIndex("percolate").mappings( // // the first mapping is the place that holds the doc we are percolating against // // it is a kind of temporary type // mapping("doc") as { // textField("flavour") // }, // // the second mapping is what holds our queries themselves // mapping("queries") as { // percolatorField("query") // } // ) // }.await // // // register some queries into the percolator field // val resp = client.execute { // bulk( // // if we don't specify the field that holds the query document, it will default to "query" // register(termQuery("flavour", "assam")).into("percolate" / "queries").withId(1), // register(matchQuery("flavour", "earl grey")).into("percolate" / "queries", "query").withId(2), // register(termQuery("flavour", "darjeeling")).into("percolate" / "queries", "query").withId(3) // ) // }.await // // blockUntilCount(3, "percolate" / "queries") // // "a percolate request" should "return queries that match the document" in { // // val matches = client.execute { // search("percolate").query(percolateQuery("doc", "query").usingSource("""{"flavour" : "assam"}""")) // }.await // // matches.size shouldBe 1 // matches.hits.head.id shouldBe "1" // } // // "a percolate request for existing document" should "return queries that match the document" in { // // this is the document we will percolate with // client.execute { // indexInto("percolate") fields "flavour" -> "darjeeling" withId "4" // }.await // // blockUntilCount(4, "percolate") // // val matches = client.execute { // // if we don't specify the field that holds the query document, it will default to "query" // search("percolate").query(percolateQuery("doc").usingId(DocumentRef("percolate", "teas", "4"))) // }.await // // matches.size shouldBe 1 // matches.hits.head.id shouldBe "3" // } //}
stringbean/elastic4s
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/PercolateTest.scala
Scala
apache-2.0
2,355
/** * This file is part of CloudScript [ http://cloudscript.modelbased.net ] * * Copyright (C) 2011- SINTEF ICT * Contact: Sebastien Mosser <[email protected]> * * Module: net.modelbased.cloudscript.samples * * CloudScript is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * CloudScript is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General * Public License along with CloudScript. If not, see * <http://www.gnu.org/licenses/>. */ package net.modelbased.cloudscript.samples.sensapp import net.modelbased.cloudscript.dsl._ import net.modelbased.cloudscript.library._ import scala.collection.JavaConversions._ import net.modelbased.cloudscript.samples.sensapp.platform.MonolithicHost class SensAppSystem extends CompositeComponent { // internal components val host = instantiates[MonolithicHost]; host hasForUUID "sensapp-host" val system = instantiates[SensApp]; system hasForUUID "sensapp-system" // Deployment binding this deploys system.ssh on host.ssh // Property binding this sets system.hostPath using host.deploymentPath } class SensApp extends WarFileComponent { val file = new java.net.URL("http://github.com/downloads/SINTEF-9012/sensapp/sensapp.war") } object Main extends App { import net.modelbased.cloudscript.kernel._ val software = new SensAppSystem descrComponent(software) //software.containeds foreach { c => descrComponent(c) } def descrComponent(c: Component) { println("Component: " + c) c.offereds foreach { s => println(" offers " + s ) } println(" expects " + (if (c.expected == null) "nothing" else c.expected)) c match { case c: CompositeComponent => { println(" contains" + c.containeds.map { _.toString }.mkString("[",", ","]")) c.connectors foreach { c => println(" connects: ") println(" from: " + c.from.offeredBy + "[" + c.from + "]") println(" to: " + c.to.offeredBy + "[" + c.to + "]") } c.containeds foreach { descrComponent(_) } } case _ => } } }
SINTEF-9012/cloudscript
net.modelbased.cloudscript.samples/src/main/scala/net/modelbased/cloudscript/samples/sensapp/Software.scala
Scala
gpl-3.0
2,508
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.matchers.dsl import org.scalatest.Matchers._ import org.scalatest._ class ResultOfAtLeastOneElementOfApplicationSpec extends FunSpec { describe("ResultOfAtLeastOneElementOfApplication ") { it("should have pretty toString when right is empty") { val result = new ResultOfAtLeastOneElementOfApplication(Vector.empty) result.toString should be ("atLeastOneElementOf (Vector())") } it("should have pretty toString when right contains 1 element") { val result = new ResultOfAtLeastOneElementOfApplication(Vector("Bob")) result.toString should be ("atLeastOneElementOf (Vector(\\"Bob\\"))") } it("should have pretty toString when right contains > 1 elements") { val result = new ResultOfAtLeastOneElementOfApplication(Vector("Bob", "Alice")) result.toString should be ("atLeastOneElementOf (Vector(\\"Bob\\", \\"Alice\\"))") } } }
dotty-staging/scalatest
scalatest-test/src/test/scala/org/scalatest/matchers/dsl/ResultOfAtLeastOneElementOfApplicationSpec.scala
Scala
apache-2.0
1,513
package uk.gov.gds.ier.transaction.overseas.openRegister import uk.gov.gds.ier.validation.{ErrorTransformForm, ErrorMessages, FormKeys} import play.api.data.Forms._ import uk.gov.gds.ier.transaction.overseas.InprogressOverseas trait OpenRegisterForms { self: FormKeys with ErrorMessages => lazy val openRegisterOptInMapping = single( keys.optIn.key -> default(boolean, true) ) val openRegisterForm = ErrorTransformForm( mapping( keys.openRegister.key -> default(openRegisterOptInMapping, true) ) ( openRegister => InprogressOverseas(openRegisterOptin = Some(openRegister)) ) ( inprogress => inprogress.openRegisterOptin ) ) }
alphagov/ier-frontend
app/uk/gov/gds/ier/transaction/overseas/openRegister/OpenRegisterForms.scala
Scala
mit
684
package org.orbeon.oxf.xforms.analysis.controls import cats.syntax.option._ import org.orbeon.dom.Element import org.orbeon.oxf.xforms.analysis._ import org.orbeon.oxf.xforms.xbl.{CommonBinding, ConcreteBinding} import org.orbeon.xforms.XFormsNames import org.orbeon.xforms.xbl.Scope import org.orbeon.xml.NamespaceMapping class ComponentControl( index : Int, element : Element, parent : Option[ElementAnalysis], preceding : Option[ElementAnalysis], staticId : String, prefixedId : String, namespaceMapping : NamespaceMapping, scope : Scope, containerScope : Scope, val isTopLevelPart : Boolean ) extends ContainerControl(index, element, parent, preceding, staticId, prefixedId, namespaceMapping, scope, containerScope) with WithChildrenTrait with OptionalSingleNode { // binding could be mandatory, optional, or prohibited val hasLazyBinding: Boolean = ! isTopLevelPart && element.attributeValueOpt(XFormsNames.XXFORMS_UPDATE_QNAME).contains(XFormsNames.XFORMS_FULL_UPDATE) var commonBinding: CommonBinding = null // TODO: pass via constructor var rootElem: Element = element // default, can be updated by `xxf:dynamic` private var _concreteBindingOpt: Option[ConcreteBinding] = None //part.getBinding(prefixedId) def hasConcreteBinding: Boolean = _concreteBindingOpt.isDefined def bindingOpt : Option[ConcreteBinding] = _concreteBindingOpt ensuring (_.isDefined || ! isTopLevelPart) def bindingOrThrow : ConcreteBinding = bindingOpt getOrElse (throw new IllegalStateException) def setConcreteBinding(concreteBinding: ConcreteBinding): Unit = { assert(! hasConcreteBinding) _concreteBindingOpt = concreteBinding.some } def clearBinding(): Unit = _concreteBindingOpt = None // Only support binding if the control defines it has a binding override def hasBinding: Boolean = commonBinding.modeBinding && super.hasBinding // Leave as 'def' as the binding can, in theory, mutate override protected def externalEventsDef = super.externalEventsDef ++ commonBinding.allowedExternalEvents override def externalEvents = externalEventsDef } trait ValueComponentTrait extends ComponentControl with ValueTrait with FormatTrait { override def format : Option[String] = commonBinding.formatOpt override def unformat: Option[String] = None }
orbeon/orbeon-forms
xforms-analysis/shared/src/main/scala/org/orbeon/oxf/xforms/analysis/controls/ComponentControl.scala
Scala
lgpl-2.1
2,455
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.connector import java.util.Collections import org.scalatest.BeforeAndAfter import org.apache.spark.sql.{catalyst, AnalysisException, DataFrame, QueryTest} import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute import org.apache.spark.sql.catalyst.plans.physical import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, RangePartitioning, UnknownPartitioning} import org.apache.spark.sql.connector.catalog.{Identifier, InMemoryTableCatalog} import org.apache.spark.sql.connector.distributions.{Distribution, Distributions} import org.apache.spark.sql.connector.expressions.{Expression, FieldReference, NullOrdering, SortDirection, SortOrder} import org.apache.spark.sql.connector.expressions.LogicalExpressions._ import org.apache.spark.sql.execution.{CommandResultExec, QueryExecution, SortExec, SparkPlan} import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec import org.apache.spark.sql.execution.exchange.ShuffleExchangeLike import org.apache.spark.sql.functions.lit import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.{IntegerType, StringType, StructType} import org.apache.spark.sql.util.QueryExecutionListener class WriteDistributionAndOrderingSuite extends QueryTest with SharedSparkSession with BeforeAndAfter with AdaptiveSparkPlanHelper { import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ before { spark.conf.set("spark.sql.catalog.testcat", classOf[InMemoryTableCatalog].getName) } after { spark.sessionState.catalogManager.reset() spark.sessionState.conf.unsetConf("spark.sql.catalog.testcat") } private val namespace = Array("ns1") private val ident = Identifier.of(namespace, "test_table") private val tableNameAsString = "testcat." + ident.toString private val emptyProps = Collections.emptyMap[String, String] private val schema = new StructType() .add("id", IntegerType) .add("data", StringType) private val resolver = conf.resolver test("ordered distribution and sort with same exprs: append") { checkOrderedDistributionAndSortWithSameExprs("append") } test("ordered distribution and sort with same exprs: overwrite") { checkOrderedDistributionAndSortWithSameExprs("overwrite") } test("ordered distribution and sort with same exprs: overwriteDynamic") { checkOrderedDistributionAndSortWithSameExprs("overwriteDynamic") } test("ordered distribution and sort with same exprs with numPartitions: append") { checkOrderedDistributionAndSortWithSameExprs("append", Some(10)) } test("ordered distribution and sort with same exprs with numPartitions: overwrite") { checkOrderedDistributionAndSortWithSameExprs("overwrite", Some(10)) } test("ordered distribution and sort with same exprs with numPartitions: overwriteDynamic") { checkOrderedDistributionAndSortWithSameExprs("overwriteDynamic", Some(10)) } private def checkOrderedDistributionAndSortWithSameExprs(command: String): Unit = { checkOrderedDistributionAndSortWithSameExprs(command, None) } private def checkOrderedDistributionAndSortWithSameExprs( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val tableDistribution = Distributions.ordered(tableOrdering) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioning = orderedWritePartitioning(writeOrdering, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeCommand = command) } test("clustered distribution and sort with same exprs: append") { checkClusteredDistributionAndSortWithSameExprs("append") } test("clustered distribution and sort with same exprs: overwrite") { checkClusteredDistributionAndSortWithSameExprs("overwrite") } test("clustered distribution and sort with same exprs: overwriteDynamic") { checkClusteredDistributionAndSortWithSameExprs("overwriteDynamic") } test("clustered distribution and sort with same exprs with numPartitions: append") { checkClusteredDistributionAndSortWithSameExprs("append", Some(10)) } test("clustered distribution and sort with same exprs with numPartitions: overwrite") { checkClusteredDistributionAndSortWithSameExprs("overwrite", Some(10)) } test("clustered distribution and sort with same exprs with numPartitions: overwriteDynamic") { checkClusteredDistributionAndSortWithSameExprs("overwriteDynamic", Some(10)) } private def checkClusteredDistributionAndSortWithSameExprs(command: String): Unit = { checkClusteredDistributionAndSortWithSameExprs(command, None) } private def checkClusteredDistributionAndSortWithSameExprs( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.DESCENDING, NullOrdering.NULLS_FIRST), sort(FieldReference("id"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val clustering = Array[Expression](FieldReference("data"), FieldReference("id")) val tableDistribution = Distributions.clustered(clustering) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Descending, catalyst.expressions.NullsFirst, Seq.empty ), catalyst.expressions.SortOrder( attr("id"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioningExprs = Seq(attr("data"), attr("id")) val writePartitioning = clusteredWritePartitioning(writePartitioningExprs, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeCommand = command) } test("clustered distribution and sort with extended exprs: append") { checkClusteredDistributionAndSortWithExtendedExprs("append") } test("clustered distribution and sort with extended exprs: overwrite") { checkClusteredDistributionAndSortWithExtendedExprs("overwrite") } test("clustered distribution and sort with extended exprs: overwriteDynamic") { checkClusteredDistributionAndSortWithExtendedExprs("overwriteDynamic") } test("clustered distribution and sort with extended exprs with numPartitions: append") { checkClusteredDistributionAndSortWithExtendedExprs("append", Some(10)) } test("clustered distribution and sort with extended exprs with numPartitions: overwrite") { checkClusteredDistributionAndSortWithExtendedExprs("overwrite", Some(10)) } test("clustered distribution and sort with extended exprs with numPartitions: " + "overwriteDynamic") { checkClusteredDistributionAndSortWithExtendedExprs("overwriteDynamic", Some(10)) } private def checkClusteredDistributionAndSortWithExtendedExprs(command: String): Unit = { checkClusteredDistributionAndSortWithExtendedExprs(command, None) } private def checkClusteredDistributionAndSortWithExtendedExprs( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.DESCENDING, NullOrdering.NULLS_FIRST), sort(FieldReference("id"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val clustering = Array[Expression](FieldReference("data")) val tableDistribution = Distributions.clustered(clustering) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Descending, catalyst.expressions.NullsFirst, Seq.empty ), catalyst.expressions.SortOrder( attr("id"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioningExprs = Seq(attr("data")) val writePartitioning = clusteredWritePartitioning(writePartitioningExprs, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeCommand = command) } test("unspecified distribution and local sort: append") { checkUnspecifiedDistributionAndLocalSort("append") } test("unspecified distribution and local sort: overwrite") { checkUnspecifiedDistributionAndLocalSort("overwrite") } test("unspecified distribution and local sort: overwriteDynamic") { checkUnspecifiedDistributionAndLocalSort("overwriteDynamic") } test("unspecified distribution and local sort with numPartitions: append") { checkUnspecifiedDistributionAndLocalSort("append", Some(10)) } test("unspecified distribution and local sort with numPartitions: overwrite") { checkUnspecifiedDistributionAndLocalSort("overwrite", Some(10)) } test("unspecified distribution and local sort with numPartitions: overwriteDynamic") { checkUnspecifiedDistributionAndLocalSort("overwriteDynamic", Some(10)) } private def checkUnspecifiedDistributionAndLocalSort(command: String): Unit = { checkUnspecifiedDistributionAndLocalSort(command, None) } private def checkUnspecifiedDistributionAndLocalSort( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.DESCENDING, NullOrdering.NULLS_FIRST) ) val tableDistribution = Distributions.unspecified() val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Descending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioning = UnknownPartitioning(0) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeCommand = command, // if the number of partitions is specified, we expect query to fail expectAnalysisException = targetNumPartitions.isDefined) } test("unspecified distribution and no sort: append") { checkUnspecifiedDistributionAndNoSort("append") } test("unspecified distribution and no sort: overwrite") { checkUnspecifiedDistributionAndNoSort("overwrite") } test("unspecified distribution and no sort: overwriteDynamic") { checkUnspecifiedDistributionAndNoSort("overwriteDynamic") } test("unspecified distribution and no sort with numPartitions: append") { checkUnspecifiedDistributionAndNoSort("append", Some(10)) } test("unspecified distribution and no sort with numPartitions: overwrite") { checkUnspecifiedDistributionAndNoSort("overwrite", Some(10)) } test("unspecified distribution and no sort with numPartitions: overwriteDynamic") { checkUnspecifiedDistributionAndNoSort("overwriteDynamic", Some(10)) } private def checkUnspecifiedDistributionAndNoSort(command: String): Unit = { checkUnspecifiedDistributionAndNoSort(command, None) } private def checkUnspecifiedDistributionAndNoSort( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array.empty[SortOrder] val tableDistribution = Distributions.unspecified() val writeOrdering = Seq.empty[catalyst.expressions.SortOrder] val writePartitioning = UnknownPartitioning(0) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeCommand = command, // if the number of partitions is specified, we expect query to fail expectAnalysisException = targetNumPartitions.isDefined) } test("ordered distribution and sort with manual global sort: append") { checkOrderedDistributionAndSortWithManualGlobalSort("append") } test("ordered distribution and sort with manual global sort: overwrite") { checkOrderedDistributionAndSortWithManualGlobalSort("overwrite") } test("ordered distribution and sort with manual global sort: overwriteDynamic") { checkOrderedDistributionAndSortWithManualGlobalSort("overwriteDynamic") } test("ordered distribution and sort with manual global sort with numPartitions: append") { checkOrderedDistributionAndSortWithManualGlobalSort("append", Some(10)) } test("ordered distribution and sort with manual global sort with numPartitions: overwrite") { checkOrderedDistributionAndSortWithManualGlobalSort("overwrite", Some(10)) } test("ordered distribution and sort with manual global sort with numPartitions: " + "overwriteDynamic") { checkOrderedDistributionAndSortWithManualGlobalSort("overwriteDynamic", Some(10)) } private def checkOrderedDistributionAndSortWithManualGlobalSort(command: String): Unit = { checkOrderedDistributionAndSortWithManualGlobalSort(command, None) } private def checkOrderedDistributionAndSortWithManualGlobalSort( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST), sort(FieldReference("id"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val tableDistribution = Distributions.ordered(tableOrdering) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ), catalyst.expressions.SortOrder( attr("id"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioning = orderedWritePartitioning(writeOrdering, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeTransform = df => df.orderBy("data", "id"), writeCommand = command) } test("ordered distribution and sort with incompatible global sort: append") { checkOrderedDistributionAndSortWithIncompatibleGlobalSort("append") } test("ordered distribution and sort with incompatible global sort: overwrite") { checkOrderedDistributionAndSortWithIncompatibleGlobalSort("overwrite") } test("ordered distribution and sort with incompatible global sort: overwriteDynamic") { checkOrderedDistributionAndSortWithIncompatibleGlobalSort("overwriteDynamic") } test("ordered distribution and sort with incompatible global sort with numPartitions: append") { checkOrderedDistributionAndSortWithIncompatibleGlobalSort("append", Some(10)) } test("ordered distribution and sort with incompatible global sort with numPartitions: " + "overwrite") { checkOrderedDistributionAndSortWithIncompatibleGlobalSort("overwrite", Some(10)) } test("ordered distribution and sort with incompatible global sort with numPartitions: " + "overwriteDynamic") { checkOrderedDistributionAndSortWithIncompatibleGlobalSort("overwriteDynamic", Some(10)) } private def checkOrderedDistributionAndSortWithIncompatibleGlobalSort(command: String): Unit = { checkOrderedDistributionAndSortWithIncompatibleGlobalSort(command, None) } private def checkOrderedDistributionAndSortWithIncompatibleGlobalSort( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST), sort(FieldReference("id"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val tableDistribution = Distributions.ordered(tableOrdering) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ), catalyst.expressions.SortOrder( attr("id"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioning = orderedWritePartitioning(writeOrdering, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeTransform = df => df.orderBy(df("data").desc, df("id").asc), writeCommand = command) } test("ordered distribution and sort with manual local sort: append") { checkOrderedDistributionAndSortWithManualLocalSort("append") } test("ordered distribution and sort with manual local sort: overwrite") { checkOrderedDistributionAndSortWithManualLocalSort("overwrite") } test("ordered distribution and sort with manual local sort: overwriteDynamic") { checkOrderedDistributionAndSortWithManualLocalSort("overwriteDynamic") } test("ordered distribution and sort with manual local sort with numPartitions: append") { checkOrderedDistributionAndSortWithManualLocalSort("append", Some(10)) } test("ordered distribution and sort with manual local sort with numPartitions: overwrite") { checkOrderedDistributionAndSortWithManualLocalSort("overwrite", Some(10)) } test("ordered distribution and sort with manual local sort with numPartitions: " + "overwriteDynamic") { checkOrderedDistributionAndSortWithManualLocalSort("overwriteDynamic", Some(10)) } private def checkOrderedDistributionAndSortWithManualLocalSort(command: String): Unit = { checkOrderedDistributionAndSortWithManualLocalSort(command, None) } private def checkOrderedDistributionAndSortWithManualLocalSort( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST), sort(FieldReference("id"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val tableDistribution = Distributions.ordered(tableOrdering) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ), catalyst.expressions.SortOrder( attr("id"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioning = orderedWritePartitioning(writeOrdering, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeTransform = df => df.sortWithinPartitions("data", "id"), writeCommand = command) } test("clustered distribution and local sort with manual global sort: append") { checkClusteredDistributionAndLocalSortWithManualGlobalSort("append") } test("clustered distribution and local sort with manual global sort: overwrite") { checkClusteredDistributionAndLocalSortWithManualGlobalSort("overwrite") } test("clustered distribution and local sort with manual global sort: overwriteDynamic") { checkClusteredDistributionAndLocalSortWithManualGlobalSort("overwriteDynamic") } test("clustered distribution and local sort with manual global sort with numPartitions: append") { checkClusteredDistributionAndLocalSortWithManualGlobalSort("append", Some(10)) } test("clustered distribution and local sort with manual global sort with numPartitions: " + "overwrite") { checkClusteredDistributionAndLocalSortWithManualGlobalSort("overwrite", Some(10)) } test("clustered distribution and local sort with manual global sort with numPartitions: " + "overwriteDynamic") { checkClusteredDistributionAndLocalSortWithManualGlobalSort("overwriteDynamic", Some(10)) } private def checkClusteredDistributionAndLocalSortWithManualGlobalSort(command: String): Unit = { checkClusteredDistributionAndLocalSortWithManualGlobalSort(command, None) } private def checkClusteredDistributionAndLocalSortWithManualGlobalSort( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.DESCENDING, NullOrdering.NULLS_FIRST), sort(FieldReference("id"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val tableDistribution = Distributions.clustered(Array(FieldReference("data"))) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Descending, catalyst.expressions.NullsFirst, Seq.empty ), catalyst.expressions.SortOrder( attr("id"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioningExprs = Seq(attr("data")) val writePartitioning = clusteredWritePartitioning(writePartitioningExprs, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeTransform = df => df.orderBy("data", "id"), writeCommand = command) } test("clustered distribution and local sort with manual local sort: append") { checkClusteredDistributionAndLocalSortWithManualLocalSort("append") } test("clustered distribution and local sort with manual local sort: overwrite") { checkClusteredDistributionAndLocalSortWithManualLocalSort("overwrite") } test("clustered distribution and local sort with manual local sort: overwriteDynamic") { checkClusteredDistributionAndLocalSortWithManualLocalSort("overwriteDynamic") } test("clustered distribution and local sort with manual local sort with numPartitions: append") { checkClusteredDistributionAndLocalSortWithManualLocalSort("append", Some(10)) } test("clustered distribution and local sort with manual local sort with numPartitions: " + "overwrite") { checkClusteredDistributionAndLocalSortWithManualLocalSort("overwrite", Some(10)) } test("clustered distribution and local sort with manual local sort with numPartitions: " + "overwriteDynamic") { checkClusteredDistributionAndLocalSortWithManualLocalSort("overwriteDynamic", Some(10)) } private def checkClusteredDistributionAndLocalSortWithManualLocalSort(command: String): Unit = { checkClusteredDistributionAndLocalSortWithManualLocalSort(command, None) } private def checkClusteredDistributionAndLocalSortWithManualLocalSort( command: String, targetNumPartitions: Option[Int]): Unit = { val tableOrdering = Array[SortOrder]( sort(FieldReference("data"), SortDirection.DESCENDING, NullOrdering.NULLS_FIRST), sort(FieldReference("id"), SortDirection.ASCENDING, NullOrdering.NULLS_FIRST) ) val tableDistribution = Distributions.clustered(Array(FieldReference("data"))) val writeOrdering = Seq( catalyst.expressions.SortOrder( attr("data"), catalyst.expressions.Descending, catalyst.expressions.NullsFirst, Seq.empty ), catalyst.expressions.SortOrder( attr("id"), catalyst.expressions.Ascending, catalyst.expressions.NullsFirst, Seq.empty ) ) val writePartitioningExprs = Seq(attr("data")) val writePartitioning = clusteredWritePartitioning(writePartitioningExprs, targetNumPartitions) checkWriteRequirements( tableDistribution, tableOrdering, targetNumPartitions, expectedWritePartitioning = writePartitioning, expectedWriteOrdering = writeOrdering, writeTransform = df => df.orderBy("data", "id"), writeCommand = command) } private def checkWriteRequirements( tableDistribution: Distribution, tableOrdering: Array[SortOrder], tableNumPartitions: Option[Int], expectedWritePartitioning: physical.Partitioning, expectedWriteOrdering: Seq[catalyst.expressions.SortOrder], writeTransform: DataFrame => DataFrame = df => df, writeCommand: String = "append", expectAnalysisException: Boolean = false): Unit = { catalog.createTable(ident, schema, Array.empty, emptyProps, tableDistribution, tableOrdering, tableNumPartitions) val df = spark.createDataFrame(Seq((1, "a"), (2, "b"), (3, "c"))).toDF("id", "data") val writer = writeTransform(df).writeTo(tableNameAsString) def executeCommand(): SparkPlan = writeCommand match { case "append" => execute(writer.append()) case "overwrite" => execute(writer.overwrite(lit(true))) case "overwriteDynamic" => execute(writer.overwritePartitions()) } if (expectAnalysisException) { intercept[AnalysisException] { executeCommand() } } else { val executedPlan = executeCommand() checkPartitioningAndOrdering(executedPlan, expectedWritePartitioning, expectedWriteOrdering) checkAnswer(spark.table(tableNameAsString), df) } } private def checkPartitioningAndOrdering( plan: SparkPlan, partitioning: physical.Partitioning, ordering: Seq[catalyst.expressions.SortOrder]): Unit = { val sorts = collect(plan) { case s: SortExec => s } assert(sorts.size <= 1, "must be at most one sort") val shuffles = collect(plan) { case s: ShuffleExchangeLike => s } assert(shuffles.size <= 1, "must be at most one shuffle") val actualPartitioning = plan.outputPartitioning val expectedPartitioning = partitioning match { case p: physical.RangePartitioning => val resolvedOrdering = p.ordering.map(resolveAttrs(_, plan)) p.copy(ordering = resolvedOrdering.asInstanceOf[Seq[catalyst.expressions.SortOrder]]) case p: physical.HashPartitioning => val resolvedExprs = p.expressions.map(resolveAttrs(_, plan)) p.copy(expressions = resolvedExprs) case other => other } assert(actualPartitioning == expectedPartitioning, "partitioning must match") val actualOrdering = plan.outputOrdering val expectedOrdering = ordering.map(resolveAttrs(_, plan)) assert(actualOrdering == expectedOrdering, "ordering must match") } private def resolveAttrs( expr: catalyst.expressions.Expression, plan: SparkPlan): catalyst.expressions.Expression = { expr.transform { case UnresolvedAttribute(Seq(attrName)) => plan.output.find(attr => resolver(attr.name, attrName)).get case UnresolvedAttribute(nameParts) => val attrName = nameParts.mkString(".") fail(s"cannot resolve a nested attr: $attrName") } } private def attr(name: String): UnresolvedAttribute = { UnresolvedAttribute(name) } private def catalog: InMemoryTableCatalog = { val catalog = spark.sessionState.catalogManager.catalog("testcat") catalog.asTableCatalog.asInstanceOf[InMemoryTableCatalog] } // executes a write operation and keeps the executed physical plan private def execute(writeFunc: => Unit): SparkPlan = { var executedPlan: SparkPlan = null val listener = new QueryExecutionListener { override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = { executedPlan = qe.executedPlan } override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = { } } spark.listenerManager.register(listener) writeFunc sparkContext.listenerBus.waitUntilEmpty() assert(executedPlan.isInstanceOf[CommandResultExec]) executedPlan.asInstanceOf[CommandResultExec].commandPhysicalPlan match { case w: V2TableWriteExec => stripAQEPlan(w.query) case _ => fail("expected V2TableWriteExec") } } private def orderedWritePartitioning( writeOrdering: Seq[catalyst.expressions.SortOrder], targetNumPartitions: Option[Int]): physical.Partitioning = { RangePartitioning(writeOrdering, targetNumPartitions.getOrElse(conf.numShufflePartitions)) } private def clusteredWritePartitioning( writePartitioningExprs: Seq[catalyst.expressions.Expression], targetNumPartitions: Option[Int]): physical.Partitioning = { HashPartitioning(writePartitioningExprs, targetNumPartitions.getOrElse(conf.numShufflePartitions)) } }
cloud-fan/spark
sql/core/src/test/scala/org/apache/spark/sql/connector/WriteDistributionAndOrderingSuite.scala
Scala
apache-2.0
30,132
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.streaming import scala.collection.mutable import org.apache.hadoop.fs.{FileStatus, Path} import org.apache.spark.sql.SparkSession import org.apache.spark.sql.execution.datasources._ import org.apache.spark.sql.types.StructType /** * A [[FileIndex]] that generates the list of files to processing by reading them from the * metadata log files generated by the [[FileStreamSink]]. * * @param userPartitionSchema an optional partition schema that will be use to provide types for * the discovered partitions */ class MetadataLogFileIndex( sparkSession: SparkSession, path: Path, userPartitionSchema: Option[StructType]) extends PartitioningAwareFileIndex(sparkSession, Map.empty, userPartitionSchema) { private val metadataDirectory = new Path(path, FileStreamSink.metadataDir) logInfo(s"Reading streaming file log from $metadataDirectory") private val metadataLog = new FileStreamSinkLog(FileStreamSinkLog.VERSION, sparkSession, metadataDirectory.toUri.toString) private val allFilesFromLog = metadataLog.allFiles().map(_.toFileStatus).filterNot(_.isDirectory) private var cachedPartitionSpec: PartitionSpec = _ override protected val leafFiles: mutable.LinkedHashMap[Path, FileStatus] = { new mutable.LinkedHashMap ++= allFilesFromLog.map(f => f.getPath -> f) } override protected val leafDirToChildrenFiles: Map[Path, Array[FileStatus]] = { allFilesFromLog.toArray.groupBy(_.getPath.getParent) } override def rootPaths: Seq[Path] = path :: Nil override def refresh(): Unit = { } override def partitionSpec(): PartitionSpec = { if (cachedPartitionSpec == null) { cachedPartitionSpec = inferPartitioning() } cachedPartitionSpec } }
minixalpha/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MetadataLogFileIndex.scala
Scala
apache-2.0
2,585
package io.buoyant.namerd.iface import com.fasterxml.jackson.annotation.JsonIgnore import com.twitter.finagle.stats.StatsReceiver import com.twitter.finagle.{Namer, Path, Stack, Thrift, ThriftMux} import com.twitter.finagle.naming.NameInterpreter import com.twitter.finagle.param import com.twitter.scrooge.ThriftService import com.twitter.util.Duration import com.twitter.util.TimeConversions._ import io.buoyant.namerd.iface.ThriftNamerInterface.LocalStamper import io.buoyant.namerd._ import java.net.InetSocketAddress import scala.util.Random case class ThriftInterpreterInterfaceConfig( retryBaseSecs: Option[Int] = None, retryJitterSecs: Option[Int] = None, cache: Option[CapacityConfig] = None ) extends InterpreterInterfaceConfig { @JsonIgnore protected def defaultAddr = ThriftInterpreterInterfaceConfig.defaultAddr @JsonIgnore def mk( interpreters: Ns => NameInterpreter, namers: Map[Path, Namer], store: DtabStore, stats: StatsReceiver ): Servable = { val stats1 = stats.scope(ThriftInterpreterInterfaceConfig.kind) val retryIn: () => Duration = { val retry = retryBaseSecs.map(_.seconds).getOrElse(10.minutes) val jitter = retryJitterSecs.map(_.seconds).getOrElse(1.minute) () => retry + (Random.nextGaussian() * jitter.inSeconds).toInt.seconds } val iface = new ThriftNamerInterface( interpreters, namers, new LocalStamper, retryIn, cache.map(_.capacity).getOrElse(ThriftNamerInterface.Capacity.default), stats1 ) val params = tlsParams + param.Stats(stats1) + Thrift.ThriftImpl.Netty4 ThriftServable(addr, iface, params) } } object ThriftInterpreterInterfaceConfig { val kind = "io.l5d.thriftNameInterpreter" val defaultAddr = new InetSocketAddress(4100) } class ThriftInterpreterInterfaceInitializer extends InterfaceInitializer { override val configId = ThriftInterpreterInterfaceConfig.kind val configClass = classOf[ThriftInterpreterInterfaceConfig] } case class ThriftServable(addr: InetSocketAddress, iface: ThriftService, params: Stack.Params) extends Servable { def kind = ThriftInterpreterInterfaceConfig.kind val thriftMux = ThriftMux.server def serve() = thriftMux.withParams(thriftMux.params ++ params).serveIface(addr, iface) } case class CapacityConfig( bindingCacheActive: Option[Int] = None, bindingCacheInactive: Option[Int] = None, addrCacheActive: Option[Int] = None, addrCacheInactive: Option[Int] = None ) { private[this] val default = ThriftNamerInterface.Capacity.default def capacity = ThriftNamerInterface.Capacity( bindingCacheActive = bindingCacheActive.getOrElse(default.bindingCacheActive), bindingCacheInactive = bindingCacheInactive.getOrElse(default.bindingCacheInactive), addrCacheActive = addrCacheActive.getOrElse(default.addrCacheActive), addrCacheInactive = addrCacheInactive.getOrElse(default.addrCacheInactive) ) }
denverwilliams/linkerd
namerd/iface/interpreter-thrift/src/main/scala/io/buoyant/namerd/iface/ThriftInterpreterInterfaceConfig.scala
Scala
apache-2.0
2,959
/** * ==== * This file is part of SensApp [ http://sensapp.modelbased.net ] * * Copyright (C) 2011- SINTEF ICT * Contact: SINTEF ICT <[email protected]> * * Module: net.modelbased.sensapp * * SensApp is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * SensApp is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General * Public License along with SensApp. If not, see * <http://www.gnu.org/licenses/>. * ==== * * This file is part of SensApp [ http://sensapp.modelbased.net ] * * Copyright (C) 2012- SINTEF ICT * Contact: SINTEF ICT <[email protected]> * * Module: net.modelbased.sensapp.backyard.gatling.ws * * SensApp is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * SensApp is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General * Public License along with SensApp. If not, see * <http://www.gnu.org/licenses/>. */ import scala.tools.nsc.io.File import scala.tools.nsc.io.Path object IDEPathHelper { val gatlingConfUrl = getClass.getClassLoader.getResource("gatling.conf").getPath val projectRootDir = File(gatlingConfUrl).parents(2) val mavenSourcesDirectory = projectRootDir / "src" / "main" / "scala" val mavenResourcesDirectory = projectRootDir / "src" / "main" / "resources" val mavenTargetDirectory = projectRootDir / "target" val mavenBinariesDirectory = mavenTargetDirectory / "classes" val dataDirectory = mavenResourcesDirectory / "data" val requestBodiesDirectory = mavenResourcesDirectory / "request-bodies" val recorderOutputDirectory = mavenSourcesDirectory val resultsDirectory = mavenTargetDirectory / "results" val recorderConfigFile = (mavenResourcesDirectory / "recorder.conf").toFile }
SINTEF-9012/sensapp
net.modelbased.sensapp.backyard.gatling.ws/src/main/scala/IDEPathHelper.scala
Scala
lgpl-3.0
2,616
import java.util.Properties import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer} /** * From http://allegro.tech/2015/08/spark-kafka-integration.html * Makes it easier to send messages to Kafka. */ class KafkaSink(createProducer: () => KafkaProducer[String, String]) extends Serializable { lazy val producer = createProducer() def send(topic: String, value: String): Unit = producer.send(new ProducerRecord(topic, value)) } object KafkaSink { def apply(config: Properties): KafkaSink = { val f = () => { val producer = new KafkaProducer[String, String](config) sys.addShutdownHook { producer.close() } producer } new KafkaSink(f) } }
polarking/ss7-ml-preprocess
src/main/scala/KafkaSink.scala
Scala
apache-2.0
699
/** * © 2019 Refinitiv. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ package cmwell.common import com.typesafe.config.ConfigFactory object Settings { val config = ConfigFactory.load() lazy val defaultProtocol = config.getString("cmwell.rdf.defaultProtocol") }
dudi3001/CM-Well
server/cmwell-common/src/main/scala/cmwell/common/Settings.scala
Scala
apache-2.0
822
// - Project: scalajs-svgjs (https://github.com/jokade/scalajs-svgjs) // Description: Scala.js bindings for the svg.js SVG object // // Copyright (c) 2015 Johannes Kastner <[email protected]> // Distributed under the MIT License (see included file LICENSE) package biz.enef.svgjs import scala.scalajs.js object SVG extends js.Object { def apply(element: String) : Doc = js.native /** * Returns true if SVG is supported by the JS environment */ def supported : Boolean = js.native }
jokade/scalajs-svgjs
src/main/scala/biz/enef/svgjs/SVG.scala
Scala
mit
518
package sbtazurepack.tasks import sbt.Logger import sbtazurepack.settings._ object GenerateCloudServiceRole { def apply(roleSettings: CloudServiceRoleSettings)(implicit logger: Logger): CloudServiceRoleDefinition = { require(roleSettings != null) require(logger != null) logger.info("cspkgRole started") new CloudServiceRoleDefinition() } }
kostrse/sbt-azurepack
src/main/scala/sbtazurepack/tasks/GenerateCloudServiceRole.scala
Scala
mit
367
object RedPaper extends ColorPaper("red") object RedPaperBox extends Box("red paper box", RedPaper) class ExistentialDrawingChild4a(name: String, box: Box[T] forSome { type T <: RedPaper.type}) { def draw = { val paper = box.take println("Drawing on "+paper) } override def toString = name } class ExistentialDrawingChild4b(name: String, box: Box[x.type] forSome {val x: RedPaper.type}) { def draw = { val paper = box.take println("Drawing on "+paper) } override def toString = name }
grzegorzbalcerek/scala-book-examples
examples/ExistentialDrawingChild4.scala
Scala
mit
517
package stores import java.util.concurrent.atomic.AtomicInteger import javax.inject.Singleton import org.coursera.example.Instructor import org.coursera.example.Partner import org.coursera.naptime.model.Keyed @Singleton class PartnerStore { @volatile var partnerStore = Map.empty[String, Partner] val nextId = new AtomicInteger(0) partnerStore = partnerStore + ( "stanford" -> Partner( courseIds = List("ml"), instructorIds = List(1), name = "Stanford University", homepage = ""), "ucsd" -> Partner( courseIds = List("lhtl"), instructorIds = List(2), name = "UCSD", homepage = "")) def get(id: String) = partnerStore.get(id) def create(partner: Keyed[String, Partner]): Unit = { partnerStore = partnerStore + (partner.key -> partner.value) } def all() = partnerStore }
vkuo-coursera/naptime
examples/src/main/scala/stores/PartnerStore.scala
Scala
apache-2.0
853
package io.opencensus.scala.doobie import cats.effect.{ContextShift, IO} import io.opencensus.scala.Tracing import io.opencensus.scala.http.testSuite.MockTracing import io.opencensus.trace.{BlankSpan, Status} import org.scalatest.{OptionValues, Outcome} import scala.concurrent.ExecutionContext.global import scala.util.Try import org.scalatest.flatspec import org.scalatest.matchers.should.Matchers class FTracingSpec extends flatspec.FixtureAnyFlatSpec with Matchers with OptionValues { implicit val cs: ContextShift[IO] = IO.contextShift(global) case class TestInput(fTracing: FTracing[IO], mock: MockTracing) override protected def withFixture(test: OneArgTest): Outcome = test(clientTracingWithMock()) override type FixtureParam = TestInput behavior of "FTracingSpec" it should "start with the correct name" in { f => f.fTracing.traceF(IO(()), "testSpan", None).unsafeRunSync() f.mock.startedSpans should have size 1 f.mock.startedSpans.head.name shouldBe "testSpan" } it should "trace with parent Span" in { f => val parentSpan = BlankSpan.INSTANCE f.fTracing.traceF(IO(()), "testSpan", Some(parentSpan)).unsafeRunSync() f.mock.startedSpans should have size 1 f.mock.startedSpans.head.parentContext.value shouldBe parentSpan.getContext } it should "stop after normal exit" in { f => f.fTracing.traceF(IO(()), "testSpan", None).unsafeRunSync() f.mock.endedSpans should have size 1 f.mock.endedSpans.head._2.value.getCanonicalCode shouldBe Status.OK.getCanonicalCode } it should "stop after error" in { f => Try( f.fTracing .traceF(IO.raiseError(new Exception("TEST")), "testSpan", None) .unsafeRunSync() ) f.mock.endedSpans should have size 1 f.mock.endedSpans.head._2.value.getCanonicalCode shouldBe Status.INTERNAL.getCanonicalCode } def clientTracingWithMock() = { val mockTracing = new MockTracing val fTracing = new FTracing[IO] { override protected val tracing: Tracing = mockTracing } TestInput(fTracing, mockTracing) } }
census-ecosystem/opencensus-scala
doobie/src/test/scala/io/opencensus/scala/doobie/FTracingSpec.scala
Scala
apache-2.0
2,093
package scalashop import java.util.concurrent.ForkJoinTask import org.scalameter._ import common._ object HorizontalBoxBlurRunner { val standardConfig = config( Key.exec.minWarmupRuns -> 5, Key.exec.maxWarmupRuns -> 10, Key.exec.benchRuns -> 10, Key.verbose -> true ) withWarmer(new Warmer.Default) def main(args: Array[String]): Unit = { val radius = 3 val width = 1920 val height = 1080 val src = new Img(width, height) val dst = new Img(width, height) val seqtime = standardConfig measure { HorizontalBoxBlur.blur(src, dst, 0, height, radius) } println(s"sequential blur time: $seqtime ms") val numTasks = 32 val partime = standardConfig measure { HorizontalBoxBlur.parBlur(src, dst, numTasks, radius) } println(s"fork/join blur time: $partime ms") println(s"speedup: ${seqtime / partime}") } } /** A simple, trivially parallelizable computation. */ object HorizontalBoxBlur { /** Blurs the rows of the source image `src` into the destination image `dst`, * starting with `from` and ending with `end` (non-inclusive). * * Within each row, `blur` traverses the pixels by going from left to right. */ def blur(src: Img, dst: Img, from: Int, end: Int, radius: Int): Unit = { // println(s"dim: (${src.width}, ${src.height}) - [$from, $end)") for (y <- from until end) { for (x <- 0 until src.width) { dst(x, y) = boxBlurKernel(src, x, y, radius) } } } /** Blurs the rows of the source image in parallel using `numTasks` tasks. * * Parallelization is done by stripping the source image `src` into * `numTasks` separate strips, where each strip is composed of some number of * rows. */ def parBlur(src: Img, dst: Img, numTasks: Int, radius: Int): Unit = { val stripWidth = Math.max(src.height / numTasks, 1) val strips = Range(0, src.height) by stripWidth val tasks = strips.map(x => { task { blur(src, dst, x, clamp(x+stripWidth, 0, src.height), radius) } }) tasks.foreach(_.join()) } }
shouya/thinking-dumps
parprog/scalashop/src/main/scala/scalashop/HorizontalBoxBlur.scala
Scala
mit
2,101
package com.softwaremill.macwire.packages class A
rcirka/macwire
tests2/src/test/scala/com/softwaremill/macwire/packages/A.scala
Scala
apache-2.0
51
package scala.meta package prettyprinters import org.scalameta.adt._ sealed trait Options { def isLazy: Boolean } private[meta] trait LowPriorityOptions { implicit case object Lazy extends Options { def isLazy = true } } object Options extends LowPriorityOptions { implicit case object Eager extends Options { def isLazy = false } }
Dveim/scalameta
scalameta/common/src/main/scala/scala/meta/prettyprinters/Options.scala
Scala
bsd-3-clause
355
package atto import atto.Atto._ import atto.syntax.refined._ import eu.timepit.refined.string.MatchesRegex import cats.implicits._ import org.scalacheck._ @SuppressWarnings(Array("org.wartremover.warts.NonUnitStatements", "org.wartremover.warts.Any")) object RefinedTest extends Properties("Refined") { import Prop._ property("refined success") = forAll { (n: Long) => type Hex = MatchesRegex["[0-9a-fA-F]+"] val hexStr = n.toHexString stringOf(hexDigit).refined[Hex].parseOnly(hexStr).option.map(_.value) === Some(hexStr) } property("refined error") = forAll { (n: Long) => type Alpha = MatchesRegex["[e-z]+"] stringOf(anyChar).refined[Alpha].parseOnly(n.toString).either.swap.toOption === Some(s"""Predicate failed: "$n".matches("[e-z]+").""") } }
tpolecat/atto
modules/tests/.jvm/src/test/scala-3/atto/RefinedTest.scala
Scala
mit
790
package com.seanshubin.detangler.console object ConsoleApplication extends App { new TopLevelWiring { override def commandLineArguments: Seq[String] = args }.launcher.run() }
SeanShubin/detangler
console/src/main/scala/com/seanshubin/detangler/console/ConsoleApplication.scala
Scala
unlicense
184
import org.scalatestplus.play._ import services.AtomicCounter import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner /** A very simple unit-test example. */ @RunWith(classOf[JUnitRunner]) class AtomicCounterSpec extends PlaySpec { "AtomicCounter" should { "produce increasing values" in { val counter: AtomicCounter = new AtomicCounter counter.nextCount() mustBe 0 counter.nextCount() mustBe 1 counter.nextCount() mustBe 2 } } }
play2-maven-plugin/play2-maven-test-projects
play25/scala/starter-example/test/AtomicCounterSpec.scala
Scala
apache-2.0
486
package diffson package sprayJson import jsonmergepatch._ class SprayJsonTestJsonMergePatch extends TestJsonMergePatch[spray.json.JsValue] with SprayJsonTestProtocol
gnieh/diffson
sprayJson/src/test/scala/diffson/sprayJson/TestJsonMergePatch.scala
Scala
apache-2.0
168
package todomvc import org.scalajs.dom import org.scalajs.dom.{Event, HashChangeEvent, KeyboardEvent} import preact.Preact.VNode import preact.macros.PreactComponent import todomvc.Model.{Filter, Item, ItemId} object TodoMvc { case class Props(initialTodos: Seq[Item], persist: Seq[Item] => Unit) case class State(todos: Seq[Item], filter: Filter, newTodo: String) { lazy val partitioned: (Seq[Item], Seq[Item]) = todos.partition(_.checked) lazy val completed: Seq[Item] = partitioned._1 lazy val uncompleted: Seq[Item] = partitioned._2 lazy val left: Int = uncompleted.size lazy val filteredTodos: Seq[Item] = { val predicate: Item => Boolean = filter match { case Filter.All => _ => true case Filter.Active => item => !item.checked case Filter.Completed => item => item.checked } todos.filter(predicate) } def updateTodos(newTodos: Seq[Item])(implicit persist: Seq[Item] => Unit): State = { if (newTodos != todos) { persist(newTodos) copy(todos = newTodos) } else { this } } } } @PreactComponent[TodoMvc.Props, TodoMvc.State] class TodoMvc(initialProps: TodoMvc.Props) { import TodoMvc._ import preact.dsl.symbol._ def todosFilter(): Filter = { dom.window.location.hash.split("/").toList match { case _ :: filter :: _ => filter match { case Filter.Active.path => Filter.Active case Filter.Completed.path => Filter.Completed case _ => Filter.All } case _ => Filter.All } } implicit private val persist = initialProps.persist initialState(State( todos = initialProps.initialTodos, newTodo = "", filter = todosFilter() )) dom.window.addEventListener("hashchange", { _: HashChangeEvent => handleRoute() }) def handleRoute(): Unit = { setState(state.copy(filter = todosFilter())) } def onItemToggle(itemId: ItemId): Unit = { val newItems = state.todos.map { item => if (item.id == itemId) { item.toggleChecked } else { item } } setState(state.updateTodos(newItems)) } def updateItem(itemId: ItemId, newText: String): Unit = { val newItems = state.todos.map { item => if (item.id == itemId) { item.copy(title = newText) } else { item } } setState(state.updateTodos(newItems)) } def onItemDelete(itemId: ItemId): Unit = { val newItems = state.todos.filterNot(_.id == itemId) setState(state.updateTodos(newItems)) } def onNewTodoInput(event: Event): Unit = { val target = Utils.extractInputTarget(event) event.preventDefault() setState(state.copy(newTodo = target.value)) } def onNewTodoKeyDown(event: KeyboardEvent): Unit = { if (event.keyCode == Utils.KeyCodes.Enter) { val text = Utils.extractInputTarget(event).value.trim if (text.nonEmpty) { event.preventDefault() val newItem = Model.Item(title = text) val newState = state .updateTodos(state.todos :+ newItem) .copy(newTodo = "") setState(newState) } } } def clearCompleted(): Unit = { val newItems = state.todos.filterNot(_.checked) setState(state.updateTodos(newItems)) } def toggleAll(event: Event): Unit = { val checked = Utils.extractInputTarget(event).checked val newItems = state.todos.map { item => item.copy(checked = checked) } setState(state.updateTodos(newItems)) } def render(): VNode = { 'section("class" -> "todoapp", 'header("class" -> "header", 'h1("todos"), 'input("class" -> "new-todo", "autofocus" -> "true", "placeholder" -> "What needs to be done", "value" -> state.newTodo, "oninput" -> onNewTodoInput _, "onkeydown" -> onNewTodoKeyDown _ ) ), if (state.todos.nonEmpty) { Entry.Children(Seq( 'section("class" -> "main", 'input("class" -> "toggle-all", "type" -> "checkbox", "onchange" -> toggleAll _, "checked" -> state.uncompleted.isEmpty ), 'label("for" -> "toggle-all", "Mark all as complete"), 'ul("class" -> "todo-list", state.filteredTodos.map { item => TodoItem(TodoItem.Props(item, onItemToggle, updateItem, onItemDelete)) } ) ), 'footer("class" -> "footer", 'span("class" -> "todo-count", 'strong(state.left.toString), s" ${Utils.pluralize("item", state.left)} left" ), 'ul("class" -> "filters", FilterButton(Filter.All, state.filter), FilterButton(Filter.Active, state.filter), FilterButton(Filter.Completed, state.filter) ), if (state.completed.nonEmpty) { 'button("class" -> "clear-completed", "onclick" -> clearCompleted _, "Clear completed" ) } else { Entry.EmptyChild } ) )) } else { Entry.EmptyChild } ) } }
LMnet/scala-js-preact
examples/todomvc/src/main/scala/todomvc/TodoMvc.scala
Scala
mit
5,253
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cogx.compiler.parser.syntaxtree import cogx.cogmath.hypercircuit.{Hyperedge, Hypernode} import cogx.platform.types.{FieldType, Opcode} /** A node in the syntax tree. * * @param _inputs The input Fields to the operation. * @param fieldTypes The FieldTypes of the outputs of the operation. * * @author Dick Carter */ private[cogx] class Operation(private[this] var _opcode: Opcode, _inputs: Array[Field], fieldTypes: Array[FieldType]) extends Hypernode[Operation](_inputs.asInstanceOf[Array[Hyperedge[Operation]]]) { /** The opcode for the operation. */ def opcode: Opcode = _opcode /** Remove reference to the possible large Opcode instance (constant opcode functions may have data references). */ def releaseResources() { _opcode = null.asInstanceOf[Opcode] } /** The inputs to this kernel, as Fields, not as the base class * Hyperedge[Operation] */ override def inputs = super.inputs.asInstanceOf[Seq[Field]] /** The outputs to this kernel, as Fields, not as the base class * Hyperedge[Operation] */ override def outputs = super.outputs.asInstanceOf[Seq[Field]] /** A description of the operation and the FieldTypes it operates on */ override def toString = { val inputs = fieldTypes.toSeq.mkString("( ", ",", " )") s"$opcode $inputs" } } object Operation { def apply(opcode: Opcode, inputs: Array[Field], fieldTypes: Array[FieldType]) = new Operation(opcode, inputs, fieldTypes) def apply(opcode: Opcode, inputs: Array[Field], fieldType: FieldType) = new Operation(opcode, inputs, Array(fieldType)) }
hpe-cct/cct-core
src/main/scala/cogx/compiler/parser/syntaxtree/Operation.scala
Scala
apache-2.0
2,228
package io.scalaland.chimney.examples object foo { import io.scalaland.chimney.dsl._ sealed trait A extends Product with Serializable sealed trait AA extends A case object A1 extends AA object into { sealed trait A extends Product with Serializable sealed trait AA extends A case object A1 extends AA } def convert(a: A): into.A = a.transformInto[into.A] }
scalalandio/chimney
chimney/src/test/scala/io/scalaland/chimney/examples/Issues.scala
Scala
apache-2.0
387
package io.igu.cityindex.example import io.igu.cityindex.HttpJwsClient import io.igu.cityindex.market.model.MarketSearchRequest import org.scalatest.concurrent.{PatienceConfiguration, ScalaFutures} import org.scalatest.time.{Millis, Seconds, Span} import org.slf4j.LoggerFactory import scala.concurrent.ExecutionContext object LogonExample extends App with ScalaFutures with PatienceConfiguration { private implicit val executionContext: ExecutionContext = scala.concurrent.ExecutionContext.global private implicit val defaultPatience = PatienceConfig(timeout = Span(10, Seconds), interval = Span(100, Millis)) private val logger = LoggerFactory.getLogger(this.getClass) val cityIndexClient = new CityIndexClient(new HttpJwsClient()) val authenticatedCityIndexClient = cityIndexClient.authenticate(args(0), args(1)).futureValue logger.info(s"Client trading account: ${authenticatedCityIndexClient.clientAndTradingAccount.futureValue}") val marketSearch = authenticatedCityIndexClient.marketSearch(MarketSearchRequest( cfdProductType = true, binaryProductType = false, includeOptions = false, query = "UK 100 CFD" )).futureValue val market = marketSearch.markets.head logger.info(s"Market Search: $market") val marketInformation = authenticatedCityIndexClient.information(market.marketId).futureValue logger.info(s"Market Information: $marketInformation") val ftsePrice = authenticatedCityIndexClient.price(market.marketId) ftsePrice.subscribe(price => logger.info(s"New Price: $price")) }
deadcore/city-index-scala-api
src/main/scala/io/igu/cityindex/example/LogonExample.scala
Scala
apache-2.0
1,549
/* * Copyright 2016 Nikolay Smelik * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package scalabot.common.chat /** * Created by Nikolay.Smelik on 7/11/2016. */ case class UserChat(id: String, source: String, from: User ) extends Chat { def userFullName: String = from.displayName override def toString: String = userFullName }
kerzok/ScalaBot
BotApi/src/main/scala/scalabot/common/chat/UserChat.scala
Scala
apache-2.0
913
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions.codegen import java.io.ByteArrayInputStream import java.util.{Map => JavaMap} import scala.collection.JavaConverters._ import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import scala.util.control.NonFatal import com.google.common.cache.{CacheBuilder, CacheLoader} import org.codehaus.janino.{ByteArrayClassLoader, ClassBodyEvaluator, SimpleCompiler} import org.codehaus.janino.util.ClassFile import scala.language.existentials import org.apache.spark.SparkEnv import org.apache.spark.internal.Logging import org.apache.spark.metrics.source.CodegenMetrics import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.util.{ArrayData, MapData} import org.apache.spark.sql.types._ import org.apache.spark.unsafe.Platform import org.apache.spark.unsafe.types._ import org.apache.spark.util.{ParentClassLoader, Utils} /** * Java source for evaluating an [[Expression]] given a [[InternalRow]] of input. * * @param code The sequence of statements required to evaluate the expression. * It should be empty string, if `isNull` and `value` are already existed, or no code * needed to evaluate them (literals). * @param isNull A term that holds a boolean value representing whether the expression evaluated * to null. * @param value A term for a (possibly primitive) value of the result of the evaluation. Not * valid if `isNull` is set to `true`. */ case class ExprCode(var code: String, var isNull: String, var value: String) /** * State used for subexpression elimination. * * @param isNull A term that holds a boolean value representing whether the expression evaluated * to null. * @param value A term for a value of a common sub-expression. Not valid if `isNull` * is set to `true`. */ case class SubExprEliminationState(isNull: String, value: String) /** * Codes and common subexpressions mapping used for subexpression elimination. * * @param codes Strings representing the codes that evaluate common subexpressions. * @param states Foreach expression that is participating in subexpression elimination, * the state to use. */ case class SubExprCodes(codes: Seq[String], states: Map[Expression, SubExprEliminationState]) /** * A context for codegen, tracking a list of objects that could be passed into generated Java * function. */ class CodegenContext { /** * Holding a list of objects that could be used passed into generated class. */ val references: mutable.ArrayBuffer[Any] = new mutable.ArrayBuffer[Any]() /** * Add an object to `references`. * * Returns the code to access it. * * This is for minor objects not to store the object into field but refer it from the references * field at the time of use because number of fields in class is limited so we should reduce it. */ def addReferenceObj(obj: Any): String = { val idx = references.length references += obj val clsName = obj.getClass.getName s"(($clsName) references[$idx])" } /** * Add an object to `references`, create a class member to access it. * * Returns the name of class member. */ def addReferenceObj(name: String, obj: Any, className: String = null): String = { val term = freshName(name) val idx = references.length references += obj val clsName = Option(className).getOrElse(obj.getClass.getName) addMutableState(clsName, term, s"this.$term = ($clsName) references[$idx];") term } /** * Holding a list of generated columns as input of current operator, will be used by * BoundReference to generate code. */ var currentVars: Seq[ExprCode] = null /** * Whether should we copy the result rows or not. * * If any operator inside WholeStageCodegen generate multiple rows from a single row (for * example, Join), this should be true. * * If an operator starts a new pipeline, this should be reset to false before calling `consume()`. */ var copyResult: Boolean = false /** * Holding expressions' mutable states like `MonotonicallyIncreasingID.count` as a * 3-tuple: java type, variable name, code to init it. * As an example, ("int", "count", "count = 0;") will produce code: * {{{ * private int count; * }}} * as a member variable, and add * {{{ * count = 0; * }}} * to the constructor. * * They will be kept as member variables in generated classes like `SpecificProjection`. */ val mutableStates: mutable.ArrayBuffer[(String, String, String)] = mutable.ArrayBuffer.empty[(String, String, String)] def addMutableState(javaType: String, variableName: String, initCode: String): Unit = { mutableStates += ((javaType, variableName, initCode)) } /** * Add buffer variable which stores data coming from an [[InternalRow]]. This methods guarantees * that the variable is safely stored, which is important for (potentially) byte array backed * data types like: UTF8String, ArrayData, MapData & InternalRow. */ def addBufferedState(dataType: DataType, variableName: String, initCode: String): ExprCode = { val value = freshName(variableName) addMutableState(javaType(dataType), value, "") val code = dataType match { case StringType => s"$value = $initCode.clone();" case _: StructType | _: ArrayType | _: MapType => s"$value = $initCode.copy();" case _ => s"$value = $initCode;" } ExprCode(code, "false", value) } def declareMutableStates(): String = { // It's possible that we add same mutable state twice, e.g. the `mergeExpressions` in // `TypedAggregateExpression`, we should call `distinct` here to remove the duplicated ones. mutableStates.distinct.map { case (javaType, variableName, _) => s"private $javaType $variableName;" }.mkString("\\n") } def initMutableStates(): String = { // It's possible that we add same mutable state twice, e.g. the `mergeExpressions` in // `TypedAggregateExpression`, we should call `distinct` here to remove the duplicated ones. val initCodes = mutableStates.distinct.map(_._3 + "\\n") // The generated initialization code may exceed 64kb function size limit in JVM if there are too // many mutable states, so split it into multiple functions. splitExpressions(initCodes, "init", Nil) } /** * Code statements to initialize states that depend on the partition index. * An integer `partitionIndex` will be made available within the scope. */ val partitionInitializationStatements: mutable.ArrayBuffer[String] = mutable.ArrayBuffer.empty def addPartitionInitializationStatement(statement: String): Unit = { partitionInitializationStatements += statement } def initPartition(): String = { partitionInitializationStatements.mkString("\\n") } /** * Holding all the functions those will be added into generated class. */ val addedFunctions: mutable.Map[String, String] = mutable.Map.empty[String, String] def addNewFunction(funcName: String, funcCode: String): Unit = { addedFunctions += ((funcName, funcCode)) } /** * Holds expressions that are equivalent. Used to perform subexpression elimination * during codegen. * * For expressions that appear more than once, generate additional code to prevent * recomputing the value. * * For example, consider two expression generated from this SQL statement: * SELECT (col1 + col2), (col1 + col2) / col3. * * equivalentExpressions will match the tree containing `col1 + col2` and it will only * be evaluated once. */ val equivalentExpressions: EquivalentExpressions = new EquivalentExpressions // Foreach expression that is participating in subexpression elimination, the state to use. val subExprEliminationExprs = mutable.HashMap.empty[Expression, SubExprEliminationState] // The collection of sub-expression result resetting methods that need to be called on each row. val subexprFunctions = mutable.ArrayBuffer.empty[String] def declareAddedFunctions(): String = { addedFunctions.map { case (funcName, funcCode) => funcCode }.mkString("\\n") } final val JAVA_BOOLEAN = "boolean" final val JAVA_BYTE = "byte" final val JAVA_SHORT = "short" final val JAVA_INT = "int" final val JAVA_LONG = "long" final val JAVA_FLOAT = "float" final val JAVA_DOUBLE = "double" /** The variable name of the input row in generated code. */ final var INPUT_ROW = "i" /** * The map from a variable name to it's next ID. */ private val freshNameIds = new mutable.HashMap[String, Int] freshNameIds += INPUT_ROW -> 1 /** * A prefix used to generate fresh name. */ var freshNamePrefix = "" /** * The map from a place holder to a corresponding comment */ private val placeHolderToComments = new mutable.HashMap[String, String] /** * Returns a term name that is unique within this instance of a `CodegenContext`. */ def freshName(name: String): String = synchronized { val fullName = if (freshNamePrefix == "") { name } else { s"${freshNamePrefix}_$name" } if (freshNameIds.contains(fullName)) { val id = freshNameIds(fullName) freshNameIds(fullName) = id + 1 s"$fullName$id" } else { freshNameIds += fullName -> 1 fullName } } /** * Returns the specialized code to access a value from `inputRow` at `ordinal`. */ def getValue(input: String, dataType: DataType, ordinal: String): String = { val jt = javaType(dataType) dataType match { case _ if isPrimitiveType(jt) => s"$input.get${primitiveTypeName(jt)}($ordinal)" case t: DecimalType => s"$input.getDecimal($ordinal, ${t.precision}, ${t.scale})" case StringType => s"$input.getUTF8String($ordinal)" case BinaryType => s"$input.getBinary($ordinal)" case CalendarIntervalType => s"$input.getInterval($ordinal)" case t: StructType => s"$input.getStruct($ordinal, ${t.size})" case _: ArrayType => s"$input.getArray($ordinal)" case _: MapType => s"$input.getMap($ordinal)" case NullType => "null" case udt: UserDefinedType[_] => getValue(input, udt.sqlType, ordinal) case _ => s"($jt)$input.get($ordinal, null)" } } /** * Returns the code to update a column in Row for a given DataType. */ def setColumn(row: String, dataType: DataType, ordinal: Int, value: String): String = { val jt = javaType(dataType) dataType match { case _ if isPrimitiveType(jt) => s"$row.set${primitiveTypeName(jt)}($ordinal, $value)" case t: DecimalType => s"$row.setDecimal($ordinal, $value, ${t.precision})" // The UTF8String may came from UnsafeRow, otherwise clone is cheap (re-use the bytes) case StringType => s"$row.update($ordinal, $value.clone())" case udt: UserDefinedType[_] => setColumn(row, udt.sqlType, ordinal, value) case _ => s"$row.update($ordinal, $value)" } } /** * Update a column in MutableRow from ExprCode. * * @param isVectorized True if the underlying row is of type `ColumnarBatch.Row`, false otherwise */ def updateColumn( row: String, dataType: DataType, ordinal: Int, ev: ExprCode, nullable: Boolean, isVectorized: Boolean = false): String = { if (nullable) { // Can't call setNullAt on DecimalType, because we need to keep the offset if (!isVectorized && dataType.isInstanceOf[DecimalType]) { s""" if (!${ev.isNull}) { ${setColumn(row, dataType, ordinal, ev.value)}; } else { ${setColumn(row, dataType, ordinal, "null")}; } """ } else { s""" if (!${ev.isNull}) { ${setColumn(row, dataType, ordinal, ev.value)}; } else { $row.setNullAt($ordinal); } """ } } else { s"""${setColumn(row, dataType, ordinal, ev.value)};""" } } /** * Returns the specialized code to set a given value in a column vector for a given `DataType`. */ def setValue(batch: String, row: String, dataType: DataType, ordinal: Int, value: String): String = { val jt = javaType(dataType) dataType match { case _ if isPrimitiveType(jt) => s"$batch.column($ordinal).put${primitiveTypeName(jt)}($row, $value);" case t: DecimalType => s"$batch.column($ordinal).putDecimal($row, $value, ${t.precision});" case t: StringType => s"$batch.column($ordinal).putByteArray($row, $value.getBytes());" case _ => throw new IllegalArgumentException(s"cannot generate code for unsupported type: $dataType") } } /** * Returns the specialized code to set a given value in a column vector for a given `DataType` * that could potentially be nullable. */ def updateColumn( batch: String, row: String, dataType: DataType, ordinal: Int, ev: ExprCode, nullable: Boolean): String = { if (nullable) { s""" if (!${ev.isNull}) { ${setValue(batch, row, dataType, ordinal, ev.value)} } else { $batch.column($ordinal).putNull($row); } """ } else { s"""${setValue(batch, row, dataType, ordinal, ev.value)};""" } } /** * Returns the specialized code to access a value from a column vector for a given `DataType`. */ def getValue(batch: String, row: String, dataType: DataType, ordinal: Int): String = { val jt = javaType(dataType) dataType match { case _ if isPrimitiveType(jt) => s"$batch.column($ordinal).get${primitiveTypeName(jt)}($row)" case t: DecimalType => s"$batch.column($ordinal).getDecimal($row, ${t.precision}, ${t.scale})" case StringType => s"$batch.column($ordinal).getUTF8String($row)" case _ => throw new IllegalArgumentException(s"cannot generate code for unsupported type: $dataType") } } /** * Returns the name used in accessor and setter for a Java primitive type. */ def primitiveTypeName(jt: String): String = jt match { case JAVA_INT => "Int" case _ => boxedType(jt) } def primitiveTypeName(dt: DataType): String = primitiveTypeName(javaType(dt)) /** * Returns the Java type for a DataType. */ def javaType(dt: DataType): String = dt match { case BooleanType => JAVA_BOOLEAN case ByteType => JAVA_BYTE case ShortType => JAVA_SHORT case IntegerType | DateType => JAVA_INT case LongType | TimestampType => JAVA_LONG case FloatType => JAVA_FLOAT case DoubleType => JAVA_DOUBLE case dt: DecimalType => "Decimal" case BinaryType => "byte[]" case StringType => "UTF8String" case CalendarIntervalType => "CalendarInterval" case _: StructType => "InternalRow" case _: ArrayType => "ArrayData" case _: MapType => "MapData" case udt: UserDefinedType[_] => javaType(udt.sqlType) case ObjectType(cls) if cls.isArray => s"${javaType(ObjectType(cls.getComponentType))}[]" case ObjectType(cls) => cls.getName case _ => "Object" } /** * Returns the boxed type in Java. */ def boxedType(jt: String): String = jt match { case JAVA_BOOLEAN => "Boolean" case JAVA_BYTE => "Byte" case JAVA_SHORT => "Short" case JAVA_INT => "Integer" case JAVA_LONG => "Long" case JAVA_FLOAT => "Float" case JAVA_DOUBLE => "Double" case other => other } def boxedType(dt: DataType): String = boxedType(javaType(dt)) /** * Returns the representation of default value for a given Java Type. */ def defaultValue(jt: String): String = jt match { case JAVA_BOOLEAN => "false" case JAVA_BYTE => "(byte)-1" case JAVA_SHORT => "(short)-1" case JAVA_INT => "-1" case JAVA_LONG => "-1L" case JAVA_FLOAT => "-1.0f" case JAVA_DOUBLE => "-1.0" case _ => "null" } def defaultValue(dt: DataType): String = defaultValue(javaType(dt)) /** * Generates code for equal expression in Java. */ def genEqual(dataType: DataType, c1: String, c2: String): String = dataType match { case BinaryType => s"java.util.Arrays.equals($c1, $c2)" case FloatType => s"(java.lang.Float.isNaN($c1) && java.lang.Float.isNaN($c2)) || $c1 == $c2" case DoubleType => s"(java.lang.Double.isNaN($c1) && java.lang.Double.isNaN($c2)) || $c1 == $c2" case dt: DataType if isPrimitiveType(dt) => s"$c1 == $c2" case dt: DataType if dt.isInstanceOf[AtomicType] => s"$c1.equals($c2)" case array: ArrayType => genComp(array, c1, c2) + " == 0" case struct: StructType => genComp(struct, c1, c2) + " == 0" case udt: UserDefinedType[_] => genEqual(udt.sqlType, c1, c2) case _ => throw new IllegalArgumentException( "cannot generate equality code for un-comparable type: " + dataType.simpleString) } /** * Generates code for comparing two expressions. * * @param dataType data type of the expressions * @param c1 name of the variable of expression 1's output * @param c2 name of the variable of expression 2's output */ def genComp(dataType: DataType, c1: String, c2: String): String = dataType match { // java boolean doesn't support > or < operator case BooleanType => s"($c1 == $c2 ? 0 : ($c1 ? 1 : -1))" case DoubleType => s"org.apache.spark.util.Utils.nanSafeCompareDoubles($c1, $c2)" case FloatType => s"org.apache.spark.util.Utils.nanSafeCompareFloats($c1, $c2)" // use c1 - c2 may overflow case dt: DataType if isPrimitiveType(dt) => s"($c1 > $c2 ? 1 : $c1 < $c2 ? -1 : 0)" case BinaryType => s"org.apache.spark.sql.catalyst.util.TypeUtils.compareBinary($c1, $c2)" case NullType => "0" case array: ArrayType => val elementType = array.elementType val elementA = freshName("elementA") val isNullA = freshName("isNullA") val elementB = freshName("elementB") val isNullB = freshName("isNullB") val compareFunc = freshName("compareArray") val minLength = freshName("minLength") val funcCode: String = s""" public int $compareFunc(ArrayData a, ArrayData b) { // when comparing unsafe arrays, try equals first as it compares the binary directly // which is very fast. if (a instanceof UnsafeArrayData && b instanceof UnsafeArrayData && a.equals(b)) { return 0; } int lengthA = a.numElements(); int lengthB = b.numElements(); int $minLength = (lengthA > lengthB) ? lengthB : lengthA; for (int i = 0; i < $minLength; i++) { boolean $isNullA = a.isNullAt(i); boolean $isNullB = b.isNullAt(i); if ($isNullA && $isNullB) { // Nothing } else if ($isNullA) { return -1; } else if ($isNullB) { return 1; } else { ${javaType(elementType)} $elementA = ${getValue("a", elementType, "i")}; ${javaType(elementType)} $elementB = ${getValue("b", elementType, "i")}; int comp = ${genComp(elementType, elementA, elementB)}; if (comp != 0) { return comp; } } } if (lengthA < lengthB) { return -1; } else if (lengthA > lengthB) { return 1; } return 0; } """ addNewFunction(compareFunc, funcCode) s"this.$compareFunc($c1, $c2)" case schema: StructType => INPUT_ROW = "i" val comparisons = GenerateOrdering.genComparisons(this, schema) val compareFunc = freshName("compareStruct") val funcCode: String = s""" public int $compareFunc(InternalRow a, InternalRow b) { // when comparing unsafe rows, try equals first as it compares the binary directly // which is very fast. if (a instanceof UnsafeRow && b instanceof UnsafeRow && a.equals(b)) { return 0; } InternalRow i = null; $comparisons return 0; } """ addNewFunction(compareFunc, funcCode) s"this.$compareFunc($c1, $c2)" case other if other.isInstanceOf[AtomicType] => s"$c1.compare($c2)" case udt: UserDefinedType[_] => genComp(udt.sqlType, c1, c2) case _ => throw new IllegalArgumentException( "cannot generate compare code for un-comparable type: " + dataType.simpleString) } /** * Generates code for greater of two expressions. * * @param dataType data type of the expressions * @param c1 name of the variable of expression 1's output * @param c2 name of the variable of expression 2's output */ def genGreater(dataType: DataType, c1: String, c2: String): String = javaType(dataType) match { case JAVA_BYTE | JAVA_SHORT | JAVA_INT | JAVA_LONG => s"$c1 > $c2" case _ => s"(${genComp(dataType, c1, c2)}) > 0" } /** * Generates code to do null safe execution, i.e. only execute the code when the input is not * null by adding null check if necessary. * * @param nullable used to decide whether we should add null check or not. * @param isNull the code to check if the input is null. * @param execute the code that should only be executed when the input is not null. */ def nullSafeExec(nullable: Boolean, isNull: String)(execute: String): String = { if (nullable) { s""" if (!$isNull) { $execute } """ } else { "\\n" + execute } } /** * List of java data types that have special accessors and setters in [[InternalRow]]. */ val primitiveTypes = Seq(JAVA_BOOLEAN, JAVA_BYTE, JAVA_SHORT, JAVA_INT, JAVA_LONG, JAVA_FLOAT, JAVA_DOUBLE) /** * Returns true if the Java type has a special accessor and setter in [[InternalRow]]. */ def isPrimitiveType(jt: String): Boolean = primitiveTypes.contains(jt) def isPrimitiveType(dt: DataType): Boolean = isPrimitiveType(javaType(dt)) /** * Splits the generated code of expressions into multiple functions, because function has * 64kb code size limit in JVM * * @param row the variable name of row that is used by expressions * @param expressions the codes to evaluate expressions. */ def splitExpressions(row: String, expressions: Seq[String]): String = { if (row == null || currentVars != null) { // Cannot split these expressions because they are not created from a row object. return expressions.mkString("\\n") } splitExpressions(expressions, "apply", ("InternalRow", row) :: Nil) } private def splitExpressions( expressions: Seq[String], funcName: String, arguments: Seq[(String, String)]): String = { val blocks = new ArrayBuffer[String]() val blockBuilder = new StringBuilder() for (code <- expressions) { // We can't know how many bytecode will be generated, so use the length of source code // as metric. A method should not go beyond 8K, otherwise it will not be JITted, should // also not be too small, or it will have many function calls (for wide table), see the // results in BenchmarkWideTable. if (blockBuilder.length > 1024) { blocks += blockBuilder.toString() blockBuilder.clear() } blockBuilder.append(code) } blocks += blockBuilder.toString() if (blocks.length == 1) { // inline execution if only one block blocks.head } else { val func = freshName(funcName) val functions = blocks.zipWithIndex.map { case (body, i) => val name = s"${func}_$i" val code = s""" |private void $name(${arguments.map { case (t, name) => s"$t $name" }.mkString(", ")}) { | $body |} """.stripMargin addNewFunction(name, code) name } functions.map(name => s"$name(${arguments.map(_._2).mkString(", ")});").mkString("\\n") } } /** * Perform a function which generates a sequence of ExprCodes with a given mapping between * expressions and common expressions, instead of using the mapping in current context. */ def withSubExprEliminationExprs( newSubExprEliminationExprs: Map[Expression, SubExprEliminationState])( f: => Seq[ExprCode]): Seq[ExprCode] = { val oldsubExprEliminationExprs = subExprEliminationExprs subExprEliminationExprs.clear newSubExprEliminationExprs.foreach(subExprEliminationExprs += _) val genCodes = f // Restore previous subExprEliminationExprs subExprEliminationExprs.clear oldsubExprEliminationExprs.foreach(subExprEliminationExprs += _) genCodes } /** * Checks and sets up the state and codegen for subexpression elimination. This finds the * common subexpressions, generates the code snippets that evaluate those expressions and * populates the mapping of common subexpressions to the generated code snippets. The generated * code snippets will be returned and should be inserted into generated codes before these * common subexpressions actually are used first time. */ def subexpressionEliminationForWholeStageCodegen(expressions: Seq[Expression]): SubExprCodes = { // Create a clear EquivalentExpressions and SubExprEliminationState mapping val equivalentExpressions: EquivalentExpressions = new EquivalentExpressions val subExprEliminationExprs = mutable.HashMap.empty[Expression, SubExprEliminationState] // Add each expression tree and compute the common subexpressions. expressions.foreach(equivalentExpressions.addExprTree(_, true, false)) // Get all the expressions that appear at least twice and set up the state for subexpression // elimination. val commonExprs = equivalentExpressions.getAllEquivalentExprs.filter(_.size > 1) val codes = commonExprs.map { e => val expr = e.head // Generate the code for this expression tree. val code = expr.genCode(this) val state = SubExprEliminationState(code.isNull, code.value) e.foreach(subExprEliminationExprs.put(_, state)) code.code.trim } SubExprCodes(codes, subExprEliminationExprs.toMap) } /** * Checks and sets up the state and codegen for subexpression elimination. This finds the * common subexpressions, generates the functions that evaluate those expressions and populates * the mapping of common subexpressions to the generated functions. */ private def subexpressionElimination(expressions: Seq[Expression]) = { // Add each expression tree and compute the common subexpressions. expressions.foreach(equivalentExpressions.addExprTree(_)) // Get all the expressions that appear at least twice and set up the state for subexpression // elimination. val commonExprs = equivalentExpressions.getAllEquivalentExprs.filter(_.size > 1) commonExprs.foreach { e => val expr = e.head val fnName = freshName("evalExpr") val isNull = s"${fnName}IsNull" val value = s"${fnName}Value" // Generate the code for this expression tree and wrap it in a function. val code = expr.genCode(this) val fn = s""" |private void $fnName(InternalRow $INPUT_ROW) { | ${code.code.trim} | $isNull = ${code.isNull}; | $value = ${code.value}; |} """.stripMargin addNewFunction(fnName, fn) // Add a state and a mapping of the common subexpressions that are associate with this // state. Adding this expression to subExprEliminationExprMap means it will call `fn` // when it is code generated. This decision should be a cost based one. // // The cost of doing subexpression elimination is: // 1. Extra function call, although this is probably *good* as the JIT can decide to // inline or not. // 2. Extra branch to check isLoaded. This branch is likely to be predicted correctly // very often. The reason it is not loaded is because of a prior branch. // 3. Extra store into isLoaded. // The benefit doing subexpression elimination is: // 1. Running the expression logic. Even for a simple expression, it is likely more than 3 // above. // 2. Less code. // Currently, we will do this for all non-leaf only expression trees (i.e. expr trees with // at least two nodes) as the cost of doing it is expected to be low. addMutableState("boolean", isNull, s"$isNull = false;") addMutableState(javaType(expr.dataType), value, s"$value = ${defaultValue(expr.dataType)};") subexprFunctions += s"$fnName($INPUT_ROW);" val state = SubExprEliminationState(isNull, value) e.foreach(subExprEliminationExprs.put(_, state)) } } /** * Generates code for expressions. If doSubexpressionElimination is true, subexpression * elimination will be performed. Subexpression elimination assumes that the code will for each * expression will be combined in the `expressions` order. */ def generateExpressions(expressions: Seq[Expression], doSubexpressionElimination: Boolean = false): Seq[ExprCode] = { if (doSubexpressionElimination) subexpressionElimination(expressions) expressions.map(e => e.genCode(this)) } /** * get a map of the pair of a place holder and a corresponding comment */ def getPlaceHolderToComments(): collection.Map[String, String] = placeHolderToComments /** * Register a comment and return the corresponding place holder */ def registerComment(text: => String): String = { // By default, disable comments in generated code because computing the comments themselves can // be extremely expensive in certain cases, such as deeply-nested expressions which operate over // inputs with wide schemas. For more details on the performance issues that motivated this // flat, see SPARK-15680. if (SparkEnv.get != null && SparkEnv.get.conf.getBoolean("spark.sql.codegen.comments", false)) { val name = freshName("c") val comment = if (text.contains("\\n") || text.contains("\\r")) { text.split("(\\r\\n)|\\r|\\n").mkString("/**\\n * ", "\\n * ", "\\n */") } else { s"// $text" } placeHolderToComments += (name -> comment) s"/*$name*/" } else { "" } } } /** * A wrapper for generated class, defines a `generate` method so that we can pass extra objects * into generated class. */ abstract class GeneratedClass { def generate(references: Array[Any]): Any } /** * A wrapper for the source code to be compiled by [[CodeGenerator]]. */ class CodeAndComment(val body: String, val comment: collection.Map[String, String]) extends Serializable { override def equals(that: Any): Boolean = that match { case t: CodeAndComment if t.body == body => true case _ => false } override def hashCode(): Int = body.hashCode } /** * A base class for generators of byte code to perform expression evaluation. Includes a set of * helpers for referring to Catalyst types and building trees that perform evaluation of individual * expressions. */ abstract class CodeGenerator[InType <: AnyRef, OutType <: AnyRef] extends Logging { protected val genericMutableRowType: String = classOf[GenericInternalRow].getName /** * Generates a class for a given input expression. Called when there is not cached code * already available. */ protected def create(in: InType): OutType /** * Canonicalizes an input expression. Used to avoid double caching expressions that differ only * cosmetically. */ protected def canonicalize(in: InType): InType /** Binds an input expression to a given input schema */ protected def bind(in: InType, inputSchema: Seq[Attribute]): InType /** Generates the requested evaluator binding the given expression(s) to the inputSchema. */ def generate(expressions: InType, inputSchema: Seq[Attribute]): OutType = generate(bind(expressions, inputSchema)) /** Generates the requested evaluator given already bound expression(s). */ def generate(expressions: InType): OutType = create(canonicalize(expressions)) /** * Create a new codegen context for expression evaluator, used to store those * expressions that don't support codegen */ def newCodeGenContext(): CodegenContext = { new CodegenContext } } object CodeGenerator extends Logging { /** * Compile the Java source code into a Java class, using Janino. */ def compile(code: CodeAndComment): GeneratedClass = { cache.get(code) } /** * Compile the Java source code into a Java class, using Janino. */ private[this] def doCompile(code: CodeAndComment): GeneratedClass = { val evaluator = new ClassBodyEvaluator() // A special classloader used to wrap the actual parent classloader of // [[org.codehaus.janino.ClassBodyEvaluator]] (see CodeGenerator.doCompile). This classloader // does not throw a ClassNotFoundException with a cause set (i.e. exception.getCause returns // a null). This classloader is needed because janino will throw the exception directly if // the parent classloader throws a ClassNotFoundException with cause set instead of trying to // find other possible classes (see org.codehaus.janinoClassLoaderIClassLoader's // findIClass method). Please also see https://issues.apache.org/jira/browse/SPARK-15622 and // https://issues.apache.org/jira/browse/SPARK-11636. val parentClassLoader = new ParentClassLoader(Utils.getContextOrSparkClassLoader) evaluator.setParentClassLoader(parentClassLoader) // Cannot be under package codegen, or fail with java.lang.InstantiationException evaluator.setClassName("org.apache.spark.sql.catalyst.expressions.GeneratedClass") evaluator.setDefaultImports(Array( classOf[Platform].getName, classOf[InternalRow].getName, classOf[UnsafeRow].getName, classOf[UTF8String].getName, classOf[Decimal].getName, classOf[CalendarInterval].getName, classOf[ArrayData].getName, classOf[UnsafeArrayData].getName, classOf[MapData].getName, classOf[UnsafeMapData].getName, classOf[Expression].getName )) evaluator.setExtendedClass(classOf[GeneratedClass]) lazy val formatted = CodeFormatter.format(code) logDebug({ // Only add extra debugging info to byte code when we are going to print the source code. evaluator.setDebuggingInformation(true, true, false) s"\\n$formatted" }) try { evaluator.cook("generated.java", code.body) recordCompilationStats(evaluator) } catch { case e: Exception => val msg = s"failed to compile: $e\\n$formatted" logError(msg, e) throw new Exception(msg, e) } evaluator.getClazz().newInstance().asInstanceOf[GeneratedClass] } /** * Records the generated class and method bytecode sizes by inspecting janino private fields. */ private def recordCompilationStats(evaluator: ClassBodyEvaluator): Unit = { // First retrieve the generated classes. val classes = { val resultField = classOf[SimpleCompiler].getDeclaredField("result") resultField.setAccessible(true) val loader = resultField.get(evaluator).asInstanceOf[ByteArrayClassLoader] val classesField = loader.getClass.getDeclaredField("classes") classesField.setAccessible(true) classesField.get(loader).asInstanceOf[JavaMap[String, Array[Byte]]].asScala } // Then walk the classes to get at the method bytecode. val codeAttr = Utils.classForName("org.codehaus.janino.util.ClassFile$CodeAttribute") val codeAttrField = codeAttr.getDeclaredField("code") codeAttrField.setAccessible(true) classes.foreach { case (_, classBytes) => CodegenMetrics.METRIC_GENERATED_CLASS_BYTECODE_SIZE.update(classBytes.length) try { val cf = new ClassFile(new ByteArrayInputStream(classBytes)) cf.methodInfos.asScala.foreach { method => method.getAttributes().foreach { a => if (a.getClass.getName == codeAttr.getName) { CodegenMetrics.METRIC_GENERATED_METHOD_BYTECODE_SIZE.update( codeAttrField.get(a).asInstanceOf[Array[Byte]].length) } } } } catch { case NonFatal(e) => logWarning("Error calculating stats of compiled class.", e) } } } /** * A cache of generated classes. * * From the Guava Docs: A Cache is similar to ConcurrentMap, but not quite the same. The most * fundamental difference is that a ConcurrentMap persists all elements that are added to it until * they are explicitly removed. A Cache on the other hand is generally configured to evict entries * automatically, in order to constrain its memory footprint. Note that this cache does not use * weak keys/values and thus does not respond to memory pressure. */ private val cache = CacheBuilder.newBuilder() .maximumSize(100) .build( new CacheLoader[CodeAndComment, GeneratedClass]() { override def load(code: CodeAndComment): GeneratedClass = { val startTime = System.nanoTime() val result = doCompile(code) val endTime = System.nanoTime() def timeMs: Double = (endTime - startTime).toDouble / 1000000 CodegenMetrics.METRIC_SOURCE_CODE_SIZE.update(code.body.length) CodegenMetrics.METRIC_COMPILATION_TIME.update(timeMs.toLong) logInfo(s"Code generated in $timeMs ms") result } }) }
Panos-Bletsos/spark-cost-model-optimizer
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
Scala
apache-2.0
38,699
package almhirt import scala.language.implicitConversions import almhirt.common._ import com.ibm.icu.util._ import almhirt.i18n.MeasuredLength /** * Stuff for internationalizations. * * Calculation functions are only to be used for display purposes! */ package object i18n { implicit object ULocaleMagnet extends LocaleMagnet[ULocale] { @inline def toULocale(what: ULocale) = what } implicit object StringLocaleMagnet extends LocaleMagnet[String] { @inline def toULocale(what: String) = new ULocale(what) } implicit object StringStringLocaleMagnet extends LocaleMagnet[(String, String)] { @inline def toULocale(what: (String, String)) = new ULocale(what._1, what._2) } implicit object StringStringStringLocaleMagnet extends LocaleMagnet[(String, String, String)] { @inline def toULocale(what: (String, String, String)) = new ULocale(what._1, what._2, what._3) } implicit object JdkLocaleMagnet extends LocaleMagnet[java.util.Locale] { @inline def toULocale(what: java.util.Locale) = ULocale.forLocale(what) } implicit class ULocaleOps(val self: ULocale) extends AnyVal { def language = self.getLanguage match { case "" ⇒ None case x ⇒ Some(x) } def script = self.getScript match { case "" ⇒ None case x ⇒ Some(x) } def country = self.getCountry match { case "" ⇒ None case x ⇒ Some(x) } } implicit class ResourceValueOps(val self: ResourceValue) extends AnyVal { def toFormatter: AlmValidation[AlmFormatter] = self match { case fmt: IcuResourceValue ⇒ scalaz.Success(new IcuFormatter(fmt.formatInstance)) case raw: RawStringResourceValue ⇒ scalaz.Success(raw) case f: BasicValueResourceValue ⇒ scalaz.Success(f.formatable) } /** * Use to create new formatters. Helpful when the formatter needs to be cloned... */ def toFormatterFun: AlmValidation[() ⇒ AlmFormatter] = self match { case fmt: IcuResourceValue ⇒ scalaz.Success(() ⇒ new IcuFormatter(fmt.formatInstance)) case raw: RawStringResourceValue ⇒ scalaz.Success(() ⇒ raw) case f: BasicValueResourceValue ⇒ scalaz.Success(() ⇒ f.formatable) } } object MeasuredImplicits { implicit final class MeasuredImplicitsOps(private val d: Double) extends AnyVal { def squareMeter = MeasuredArea(d, UnitsOfMeasurement.SquareMeter) def squareCentimeter = MeasuredArea(d, UnitsOfMeasurement.SquareCentimeter) def squareFoot = MeasuredArea(d, UnitsOfMeasurement.SquareFoot) def squareInch = MeasuredArea(d, UnitsOfMeasurement.SquareInch) def squareYard = MeasuredArea(d, UnitsOfMeasurement.SquareYard) def volt = MeasuredVoltage(d, UnitsOfMeasurement.Volt) def ampere = MeasuredCurrent(d, UnitsOfMeasurement.Ampere) def milliampere = MeasuredCurrent(d, UnitsOfMeasurement.Milliampere) def millimeter = MeasuredLength(d, UnitsOfMeasurement.Millimeter) def centimeter = MeasuredLength(d, UnitsOfMeasurement.Centimeter) def meter = MeasuredLength(d, UnitsOfMeasurement.Meter) def kilometer = MeasuredLength(d, UnitsOfMeasurement.Kilometer) def inch = MeasuredLength(d, UnitsOfMeasurement.Inch) def yard = MeasuredLength(d, UnitsOfMeasurement.Yard) def gram = MeasuredMass(d, UnitsOfMeasurement.Gram) def kilogram = MeasuredMass(d, UnitsOfMeasurement.Kilogram) def ton = MeasuredMass(d, UnitsOfMeasurement.Ton) def pound = MeasuredMass(d, UnitsOfMeasurement.Pound) def watt = MeasuredPower(d, UnitsOfMeasurement.Watt) def milliwatt = MeasuredPower(d, UnitsOfMeasurement.Milliwatt) def kilowatt = MeasuredPower(d, UnitsOfMeasurement.Kilowatt) def fahrenheit = MeasuredTemperature(d, UnitsOfMeasurement.Fahrenheit) def celsius = MeasuredTemperature(d, UnitsOfMeasurement.Celsius) def kelvin = MeasuredTemperature(d, UnitsOfMeasurement.Kelvin) def lux = MeasuredLight(d, UnitsOfMeasurement.Lux) def lumen = MeasuredLightFlux(d, UnitsOfMeasurement.Lumen) def lumenPerWatt = MeasuredLuminousEfficacy(d, UnitsOfMeasurement.LumenPerWatt) } implicit final class MeasuredLengthsOps(private val measured: MeasuredLength) extends AnyVal { def asMillimeter = measured.to(UnitsOfMeasurement.Millimeter) def asCentimeter = measured.to(UnitsOfMeasurement.Centimeter) def asMeter = measured.to(UnitsOfMeasurement.Meter) def asKilometer = measured.to(UnitsOfMeasurement.Kilometer) def asInch = measured.to(UnitsOfMeasurement.Inch) } } }
chridou/almhirt
almhirt-i18n/src/main/scala/almhirt/i18n/package.scala
Scala
apache-2.0
4,743
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.python import scala.collection.JavaConverters._ import net.razorvine.pickle.{Pickler, Unpickler} import org.apache.spark.TaskContext import org.apache.spark.api.python.{ChainedPythonFunctions, PythonEvalType} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.types.{StructField, StructType} /** * A physical plan that evaluates a [[PythonUDF]] */ case class BatchEvalPythonExec(udfs: Seq[PythonUDF], resultAttrs: Seq[Attribute], child: SparkPlan) extends EvalPythonExec { protected override def evaluate( funcs: Seq[ChainedPythonFunctions], argOffsets: Array[Array[Int]], iter: Iterator[InternalRow], schema: StructType, context: TaskContext): Iterator[InternalRow] = { EvaluatePython.registerPicklers() // register pickler for Row val dataTypes = schema.map(_.dataType) val needConversion = dataTypes.exists(EvaluatePython.needConversionInPython) // enable memo iff we serialize the row with schema (schema and class should be memorized) val pickle = new Pickler(needConversion) // Input iterator to Python: input rows are grouped so we send them in batches to Python. // For each row, add it to the queue. val inputIterator = iter.map { row => if (needConversion) { EvaluatePython.toJava(row, schema) } else { // fast path for these types that does not need conversion in Python val fields = new Array[Any](row.numFields) var i = 0 while (i < row.numFields) { val dt = dataTypes(i) fields(i) = EvaluatePython.toJava(row.get(i, dt), dt) i += 1 } fields } }.grouped(100).map(x => pickle.dumps(x.toArray)) // Output iterator for results from Python. val outputIterator = new PythonUDFRunner(funcs, PythonEvalType.SQL_BATCHED_UDF, argOffsets) .compute(inputIterator, context.partitionId(), context) val unpickle = new Unpickler val mutableRow = new GenericInternalRow(1) val resultType = if (udfs.length == 1) { udfs.head.dataType } else { StructType(udfs.map(u => StructField("", u.dataType, u.nullable))) } val fromJava = EvaluatePython.makeFromJava(resultType) outputIterator.flatMap { pickedResult => val unpickledBatch = unpickle.loads(pickedResult) unpickledBatch.asInstanceOf[java.util.ArrayList[Any]].asScala }.map { result => if (udfs.length == 1) { // fast path for single UDF mutableRow(0) = fromJava(result) mutableRow } else { fromJava(result).asInstanceOf[InternalRow] } } } }
goldmedal/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/python/BatchEvalPythonExec.scala
Scala
apache-2.0
3,570
package com.outr.stripe.card import scala.scalajs.js import scala.scalajs.js.| @js.native trait StripeCard extends js.Object { def createToken(info: StripeCardInfo, responseHandler: js.Function2[Int, CardTokenInfo, Unit]): Unit = js.native def validateCardNumber(number: String): Boolean = js.native def validateExpiry(month: Int | String, year: Int | String): Boolean = js.native def validateCVC(cvc: String): Boolean = js.native def cardType(number: String): String = js.native }
outr/scala-stripe
core/js/src/main/scala/com/outr/stripe/card/StripeCard.scala
Scala
mit
494
package de.endrullis.firefoxstateapi import java.io.File import java.nio.file.Files import de.endrullis.firefoxstateapi.FirefoxState._ import scala.io.Source import scala.util.matching.Regex /** * An API to access the state of the Firefox web browser. * * @author Stefan Endrullis &lt;[email protected]&gt; */ class FirefoxStateApi { private val ProfileNamePattern: Regex = "Path=([^.]*)[.]default".r private val homeDir: String = System.getProperty("user.home") private val profilesFile = new File(s"$homeDir/.mozilla/firefox/profiles.ini") /** All profiles. */ val profiles: List[String] = { if (profilesFile.exists()) { Source.fromFile(profilesFile, "UTF-8").getLines().collect { case ProfileNamePattern(name) => name }.toList } else { Nil } } /** Selected profile. */ var profile: String = profiles.head /** The recovery.js file. */ def recoveryLz4File = new File(s"$homeDir/.mozilla/firefox/$profile.default/sessionstore-backups/recovery.jsonlz4") def recoveryFile = new File(s"$homeDir/.mozilla/firefox/$profile.default/sessionstore-backups/recovery.js") /** The cached state of the recovery.js file. */ var state: FirefoxState = _ /** Updates the cached state of the recovery.js and returns it. */ def updateState(): FirefoxState = { if (recoveryLz4File.exists()) { import net.jpountz.lz4.LZ4Factory val factory = LZ4Factory.fastestInstance val compressedBytes = Files.readAllBytes(recoveryLz4File.toPath) val decompressedBytes = new Array[Byte](10000000) val decompressor = factory.safeDecompressor val decompressedLength = decompressor.decompress(compressedBytes, 8+4, compressedBytes.length-8-4, decompressedBytes, 0) val content = new String(decompressedBytes, 0, decompressedLength, "UTF-8") state = FirefoxState.parse(content) } else { state = FirefoxState.parse(recoveryFile) } state } /** Firefox session. */ def session: Session = state.session /** All Firefox cookies. */ def cookies: List[Cookie] = state.cookies /** All Firefox windows. */ def windows: List[Window] = state.windows /** The selected Firefox window. */ def selectedWindow: Window = state.selectedWindow /** The selected Firefox tab. */ def selectedTab: Tab = selectedWindow.selectedTab /** The selected Firefox tab entry. */ def selectedTabEntry: Entry = selectedTab.selectedEntry }
xylo/FirefoxStateApi
src/main/scala/de/endrullis/firefoxstateapi/FirefoxStateApi.scala
Scala
apache-2.0
2,380
package testcode class Foo { TestMacro.call() }
dotty-staging/dotty
tests/pos/i13532/Foo.scala
Scala
apache-2.0
49
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.s2graph.rest.play import java.text.SimpleDateFormat /** * Created by alec on 15. 4. 20.. */ package object models { def tsFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") }
daewon/incubator-s2graph
s2rest_play/app/org/apache/s2graph/rest/play/models/package.scala
Scala
apache-2.0
1,013
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.cloudml.zen.ml.clustering import breeze.linalg.{DenseVector => BDV} import com.github.cloudml.zen.ml.clustering.LDADefines._ import com.github.cloudml.zen.ml.clustering.algorithm.LDATrainer import com.github.cloudml.zen.ml.partitioner._ import com.github.cloudml.zen.ml.util._ import org.apache.hadoop.fs.Path import org.apache.spark.broadcast.Broadcast import org.apache.spark.graphx2._ import org.apache.spark.graphx2.impl._ import org.apache.spark.mllib.linalg.distributed.{MatrixEntry, RowMatrix} import org.apache.spark.mllib.linalg.{SparseVector => SSV, Vector => SV} import org.apache.spark.rdd.RDD import org.apache.spark.storage.StorageLevel class LDA(@transient var edges: EdgeRDDImpl[TA, _], @transient var verts: VertexRDDImpl[TC], val numTopics: Int, val numTerms: Int, val numDocs: Long, val numTokens: Long, var alpha: Double, var beta: Double, var alphaAS: Double, val algo: LDATrainer, var storageLevel: StorageLevel) extends Serializable { @transient var globalCountersBc: Broadcast[LDAGlobalCounters] = _ @transient lazy val seed = (new XORShiftRandom).nextInt() @transient var edgeCpFile: String = _ @transient var vertCpFile: String = _ def setAlpha(alpha: Double): this.type = { this.alpha = alpha this } def setBeta(beta: Double): this.type = { this.beta = beta this } def setAlphaAS(alphaAS: Double): this.type = { this.alphaAS = alphaAS this } def setStorageLevel(storageLevel: StorageLevel): this.type = { this.storageLevel = storageLevel this } def termVertices: VertexRDD[TC] = verts.filter(t => isTermId(t._1)) def docVertices: VertexRDD[TC] = verts.filter(t => isDocId(t._1)) @inline private def scContext = edges.context @inline private def scConf = scContext.getConf def init(computedModel: Option[RDD[NwkPair]] = None): Unit = { val initPartRDD = edges.partitionsRDD.mapPartitions(_.map { case (pid, ep) => (pid, algo.initEdgePartition(ep)) }, preservesPartitioning=true) val newEdges = edges.withPartitionsRDD(initPartRDD) newEdges.persist(storageLevel).setName("edges-0") edges = newEdges verts = algo.updateVertexCounters(newEdges, verts) verts = computedModel match { case Some(cm) => val ccm = compressCounterRDD(cm, numTopics) verts.leftJoin(ccm)((_, uc, cc) => cc.getOrElse(uc)).asInstanceOf[VertexRDDImpl[TC]] case None => verts } verts.persist(storageLevel).setName("vertices-0") val globalCounters = algo.collectGlobalCounters(verts) globalCountersBc = scContext.broadcast(globalCounters) } def runGibbsSampling(totalIter: Int): Unit = { val evalMetrics = scConf.get(cs_evalMetric, "none").split(raw"\\+") val toEval = !evalMetrics.contains("none") val saveIntv = scConf.getInt(cs_saveInterval, 0) if (toEval) { println("Before Gibbs sampling:") LDAMetrics(this, evalMetrics, lastIter=false).foreach(_.output(println)) } var iter = 1 while (iter <= totalIter) { println(s"\\nStart Gibbs sampling (Iteration $iter/$totalIter)") val startedAt = System.nanoTime gibbsSampling(iter) if (toEval) { LDAMetrics(this, evalMetrics, iter == totalIter).foreach(_.output(println)) } if (saveIntv > 0 && iter % saveIntv == 0 && iter < totalIter) { val model = toLDAModel val savPath = new Path(scConf.get(cs_outputpath) + s"-iter$iter") val fs = SparkUtils.getFileSystem(scConf, savPath) fs.delete(savPath, true) model.save(scContext, savPath.toString) println(s"Model saved after Iteration $iter") } val elapsedSeconds = (System.nanoTime - startedAt) / 1e9 println(s"End Gibbs sampling (Iteration $iter/$totalIter) takes total: $elapsedSeconds secs") iter += 1 } } def gibbsSampling(sampIter: Int): Unit = { val chkptIntv = scConf.getInt(cs_chkptInterval, 0) val needChkpt = chkptIntv > 0 && sampIter % chkptIntv == 1 && scContext.getCheckpointDir.isDefined val startedAt = System.nanoTime val newEdges = algo.sampleGraph(edges, verts, globalCountersBc, seed, sampIter, numTokens, numTerms, alpha, alphaAS, beta) newEdges.persist(storageLevel).setName(s"edges-$sampIter") if (needChkpt) { newEdges.checkpoint() newEdges.partitionsRDD.count() } val newVerts = algo.updateVertexCounters(newEdges, verts) newVerts.persist(storageLevel).setName(s"vertices-$sampIter") if (needChkpt) { newVerts.checkpoint() } val newGlobalCounters = algo.collectGlobalCounters(newVerts) val count = newGlobalCounters.data.par.sum assert(count == numTokens, s"numTokens=$numTokens, count=$count") edges.unpersist(blocking=false) verts.unpersist(blocking=false) globalCountersBc.unpersist(blocking=false) edges = newEdges verts = newVerts globalCountersBc = scContext.broadcast(newGlobalCounters) if (needChkpt) { if (edgeCpFile != null && vertCpFile != null) { SparkUtils.deleteChkptDirs(scConf, Array(edgeCpFile, vertCpFile)) } edgeCpFile = newEdges.getCheckpointFile.get vertCpFile = newVerts.getCheckpointFile.get } val elapsedSeconds = (System.nanoTime - startedAt) / 1e9 println(s"Sampling & update paras $sampIter takes: $elapsedSeconds secs") } def toLDAModel: DistributedLDAModel = { val termTopicsRDD = decompressVertexRDD(termVertices, numTopics) termTopicsRDD.persist(storageLevel) new DistributedLDAModel(termTopicsRDD, numTopics, numTerms, numTokens, alpha, beta, alphaAS, storageLevel) } // /** // * run more iters, return averaged counters // * @param filter return which vertices // * @param runIter saved more these iters' averaged model // */ // def runSum(filter: VertexId => Boolean, // runIter: Int = 0): RDD[(VertexId, TC)] = { // def vertices = verts.filter(t => filter(t._1)) // var countersSum = vertices // countersSum.persist(storageLevel) // var iter = 1 // while (iter <= runIter) { // println(s"Save TopicModel (Iteration $iter/$runIter)") // gibbsSampling(iter) // countersSum = countersSum.innerZipJoin(vertices)((_, a, b) => a :+= b) // countersSum.persist(storageLevel) // iter += 1 // } // countersSum // } // def mergeDuplicateTopic(threshold: Double = 0.95D): Map[Int, Int] = { // val rows = termVertices.map(t => t._2).map(v => { // val length = v.length // val index = v.activeKeysIterator.toArray // val data = v.activeValuesIterator.toArray.map(_.toDouble) // new SSV(length, index, data).asInstanceOf[SV] // }) // val simMatrix = new RowMatrix(rows).columnSimilarities() // val minMap = simMatrix.entries.filter { // case MatrixEntry(row, column, sim) => sim > threshold && row != column // }.map { // case MatrixEntry(row, column, sim) => (column.toInt, row.toInt) // }.groupByKey().map { // case (topic, simTopics) => (topic, simTopics.min) // }.collect().toMap // if (minMap.nonEmpty) { // val mergingCorpus = corpus.mapEdges(_.attr.map(topic => // minMap.getOrElse(topic, topic)) // ) // corpus = algo.updateVertexCounters(mergingCorpus, numTopics) // } // minMap // } } object LDA { def apply(docs: EdgeRDD[TA], numTopics: Int, alpha: Double, beta: Double, alphaAS: Double, algo: LDATrainer, storageLevel: StorageLevel): LDA = { val initCorpus = LBVertexRDDBuilder.fromEdgeRDD[TC, TA](docs, storageLevel) initCorpus.persist(storageLevel) val edges = initCorpus.edges val numTokens = edges.count() println(s"tokens in the corpus: $numTokens") val verts = initCorpus.vertices.asInstanceOf[VertexRDDImpl[TC]] val numTerms = verts.map(_._1).filter(isTermId).count().toInt println(s"terms in the corpus: $numTerms") val numDocs = verts.map(_._1).filter(isDocId).count() println(s"docs in the corpus: $numDocs") val lda = new LDA(edges, verts, numTopics, numTerms, numDocs, numTokens, alpha, beta, alphaAS, algo, storageLevel) lda.init() initCorpus.unpersist() lda } // initialize LDA for inference or incremental training def apply(computedModel: DistributedLDAModel, docs: EdgeRDD[TA], algo: LDATrainer): LDA = { val numTopics = computedModel.numTopics val numTerms = computedModel.numTerms val numTokens = computedModel.numTokens val alpha = computedModel.alpha val beta = computedModel.beta val alphaAS = computedModel.alphaAS val storageLevel = computedModel.storageLevel println(s"tokens in the corpus: $numTokens") println(s"terms in the corpus: $numTerms") val initCorpus = LBVertexRDDBuilder.fromEdgeRDD[TC, TA](docs, storageLevel) initCorpus.persist(storageLevel) val edges = initCorpus.edges val verts = initCorpus.vertices.asInstanceOf[VertexRDDImpl[TC]] val numDocs = verts.map(_._1).filter(isDocId).count() println(s"docs in the corpus: $numDocs") val lda = new LDA(edges, verts, numTopics, numTerms, numDocs, numTokens, alpha, beta, alphaAS, algo, storageLevel) lda.init(Some(computedModel.termTopicsRDD)) verts.unpersist() lda } /** * LDA training * @param docs EdgeRDD of corpus * @param totalIter the number of iterations * @param numTopics the number of topics (5000+ for large data) * @param alpha recommend to be (5.0 /numTopics) * @param beta recommend to be in range 0.001 - 0.1 * @param alphaAS recommend to be in range 0.01 - 1.0 * @param algo LDA training algorithm used * @param storageLevel StorageLevel that the LDA Model RDD uses * @return DistributedLDAModel */ def train(docs: EdgeRDD[TA], totalIter: Int, numTopics: Int, alpha: Double, beta: Double, alphaAS: Double, algo: LDATrainer, storageLevel: StorageLevel): DistributedLDAModel = { val lda = LDA(docs, numTopics, alpha, beta, alphaAS, algo, storageLevel) lda.runGibbsSampling(totalIter) lda.toLDAModel } def incrementalTrain(docs: EdgeRDD[TA], computedModel: DistributedLDAModel, totalIter: Int, algo: LDATrainer, storageLevel: StorageLevel): DistributedLDAModel = { val lda = LDA(computedModel, docs, algo) var iter = 1 while (iter <= 15) { lda.gibbsSampling(iter) iter += 1 } lda.runGibbsSampling(totalIter) lda.toLDAModel } /** * @param orgDocs RDD of documents, which are term (word) count vectors paired with IDs. * The term count vectors are "bags of words" with a fixed-size vocabulary * (where the vocabulary size is the length of the vector). * Document IDs must be unique and >= 0. */ def initializeCorpusEdges(orgDocs: RDD[_], docType: String, numTopics: Int, algo: LDATrainer, storageLevel: StorageLevel): EdgeRDD[TA] = { val conf = orgDocs.context.getConf val ignDid = conf.getBoolean(cs_ignoreDocId, false) val partStrategy = conf.get(cs_partStrategy, "dbh") val initStrategy = conf.get(cs_initStrategy, "random") val byDoc = algo.isByDoc val docs = docType match { case "raw" => convertRawDocs(orgDocs.asInstanceOf[RDD[String]], numTopics, ignDid, byDoc) case "bow" => convertBowDocs(orgDocs.asInstanceOf[RDD[BOW]], numTopics, ignDid, byDoc) } val graph: Graph[TC, TA] = LBVertexRDDBuilder.fromEdges(docs, storageLevel) graph.persist(storageLevel) graph.edges.setName("rawEdges").count() val partCorpus = partitionCorpus(graph, partStrategy, byDoc, storageLevel) val initCorpus = reinitCorpus(partCorpus, initStrategy, numTopics, storageLevel) val edges = initCorpus.edges edges.persist(storageLevel).count() graph.unpersist() edges } def convertRawDocs(rawDocs: RDD[String], numTopics: Int, ignDid: Boolean, byDoc: Boolean): RDD[Edge[TA]] = { rawDocs.mapPartitionsWithIndex { (pid, iter) => val gen = new XORShiftRandom(pid + 117) var pidMark = pid.toLong << 48 iter.flatMap { line => val tokens = line.split(raw"\\t|\\s+").view val docId = if (ignDid) { pidMark += 1 pidMark } else { tokens.head.split(":")(0).toLong } tokens.tail.flatMap { field => val pairs = field.split(":") val termId = pairs(0).toInt val termCnt = if (pairs.length > 1) pairs(1).toInt else 1 if (termCnt > 0) { Range(0, termCnt).map(_ => if (byDoc) { Edge(genNewDocId(docId), termId, gen.nextInt(numTopics)) } else { Edge(termId, genNewDocId(docId), gen.nextInt(numTopics)) }) } else { Iterator.empty } } } } } def convertBowDocs(bowDocs: RDD[BOW], numTopics: Int, ignDid: Boolean, byDoc: Boolean): RDD[Edge[TA]] = { bowDocs.mapPartitionsWithIndex { (pid, iter) => val gen = new XORShiftRandom(pid + 117) var pidMark = pid.toLong << 48 iter.flatMap { case (oDocId, tokens) => val docId = if (ignDid) { pidMark += 1 pidMark } else { oDocId } tokens.activeIterator.filter(_._2 > 0).flatMap { case (termId, termCnt) => Range(0, termCnt).map(_ => if (byDoc) { Edge(genNewDocId(docId), termId, gen.nextInt(numTopics)) } else { Edge(termId, genNewDocId(docId), gen.nextInt(numTopics)) }) } } } } def partitionCorpus(corpus: Graph[TC, TA], partStrategy: String, byDoc: Boolean, storageLevel: StorageLevel): Graph[TC, TA] = partStrategy match { case "direct" => println("don't repartition, directly build graph.") corpus case "byterm" => println("partition corpus by terms.") if (byDoc) { EdgeDstPartitioner.partitionByEDP[TC, TA](corpus, storageLevel) } else { EdgeSrcPartitioner.partitionByESP[TC, TA](corpus, storageLevel) } case "bydoc" => println("partition corpus by docs.") if (byDoc) { EdgeSrcPartitioner.partitionByESP[TC, TA](corpus, storageLevel) } else { EdgeDstPartitioner.partitionByEDP[TC, TA](corpus, storageLevel) } case "edge2d" => println("using Edge2D partition strategy.") Edge2DPartitioner.partitionByEdge2D[TC, TA](corpus, storageLevel) case "dbh" => println("using Degree-based Hashing partition strategy.") DBHPartitioner.partitionByDBH[TC, TA](corpus, 0, storageLevel) case "dbh+" => println("using Degree-based Hashing Plus partition strategy.") DBHPartitioner.partitionByDBH[TC, TA](corpus, 20, storageLevel) case "vsdlp" => println("using Vertex-cut Stochastic Dynamic Label Propagation partition strategy.") VSDLPPartitioner.partitionByVSDLP[TC, TA](corpus, 4, storageLevel) case "bbr" => println("using Bounded & Balanced Rearranger partition strategy.") BBRPartitioner.partitionByBBR[TC, TA](corpus, storageLevel) case _ => throw new NoSuchMethodException("No this algorithm or not implemented.") } def reinitCorpus(corpus: Graph[TC, TA], initStrategy: String, numTopics: Int, storageLevel: StorageLevel): Graph[TC, TA] = initStrategy match { case "random" => println("fully randomized initialization.") corpus case "sparseterm" => println("sparsely init on terms.") corpus.persist(storageLevel) val gen = new XORShiftRandom val tMin = math.max(100, numTopics / 10) val degGraph = GraphImpl(corpus.degrees, corpus.edges) val reSampledGraph = degGraph.mapVertices { (vid, deg) => if (isTermId(vid) && deg > tMin) { Array.fill(tMin)(gen.nextInt(numTopics)) } else { null } }.mapTriplets((pid, iter) => { val gen = new XORShiftRandom(pid + 223) iter.map { triplet => val wc = triplet.srcAttr if (wc == null) triplet.attr else wc(gen.nextInt(wc.length)) } }, TripletFields.Src) GraphImpl(corpus.vertices, reSampledGraph.edges) case "sparsedoc" => println("sparsely init on docs.") corpus.persist(storageLevel) val gen = new XORShiftRandom val degGraph = GraphImpl(corpus.degrees, corpus.edges) val reSampledGraph = degGraph.mapVertices { (vid, deg) => val tMin = math.max(10, deg / 10) if (isDocId(vid) && deg > tMin) { Array.fill(tMin)(gen.nextInt(numTopics)) } else { null } }.mapTriplets((pid, iter) => { val gen = new XORShiftRandom(pid + 223) iter.map { triplet => val wc = triplet.dstAttr if (wc == null) triplet.attr else wc(gen.nextInt(wc.length)) } }, TripletFields.Dst) GraphImpl(corpus.vertices, reSampledGraph.edges) case _ => throw new NoSuchMethodException("No this algorithm or not implemented.") } }
bhoppi/zen
ml/src/main/scala/com/github/cloudml/zen/ml/clustering/LDA.scala
Scala
apache-2.0
17,957
package org.jetbrains.plugins.scala.lang.transformation.calls import org.jetbrains.plugins.scala.lang.transformation.TransformerTest /** * @author Pavel Fatin */ class ExpandUnaryCallTest extends TransformerTest(ExpandUnaryCall, """ object O { def unary_! {} def !(p: A) {} } """) { def testImplicit() = check( "!O", "O.unary_!" ) def testSynthetic() = check( "!true", "true.unary_!" ) def testExplicit() = check( "O.unary_!", "O.unary_!" ) def testOtherPrefix() = check( "+O", "+O" ) def testOtherMethod() = check( "O.!(A)", "O.!(A)" ) // TODO test renamed method }
whorbowicz/intellij-scala
test/org/jetbrains/plugins/scala/lang/transformation/calls/ExpandUnaryCallTest.scala
Scala
apache-2.0
670
package org.nkvoll.gpsd.client import com.fasterxml.jackson.databind.ObjectMapper package object commands { sealed trait GPSCommand { def serialize(): Array[Byte] } object GPSCommand { val mapper = new ObjectMapper() val watchPrefix = "?WATCH=".getBytes } case class Watch(enable: Boolean, json: Boolean) extends GPSCommand { def serialize(): Array[Byte] = { val node = GPSCommand.mapper.createObjectNode() node.put("enable", enable) if(json) node.put("json", json) GPSCommand.watchPrefix ++ GPSCommand.mapper.writeValueAsBytes(node) } } }
nkvoll/scala-gpsd
src/main/scala/org/nkvoll/gpsd/client/commands/package.scala
Scala
mit
605
package org.jetbrains.plugins.scala package lang.psi.types import com.intellij.psi.search.LocalSearchScope import com.intellij.psi.search.searches.ClassInheritorsSearch import com.intellij.psi.{PsiClass, PsiTypeParameter} import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScModifierListOwner import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition import org.jetbrains.plugins.scala.lang.psi.types.api.{Contravariant, Covariant, Invariant, ParameterizedType, Variance} import org.jetbrains.plugins.scala.util.ScEquivalenceUtil.areClassesEquivalent /** * Nikolay.Tropin * 2014-04-03 */ object ComparingUtil { //this relation is actually symmetric def isNeverSubClass(clazz1: PsiClass, clazz2: PsiClass): Boolean = { val classes = Seq(clazz1, clazz2) val oneFinal = clazz1.isEffectivelyFinal || clazz2.isEffectivelyFinal val twoNonTraitsOrInterfaces = !classes.exists(_.isInterface) def inheritorsInSameFile(clazz: PsiClass) = { import scala.collection.JavaConversions._ ClassInheritorsSearch.search(clazz, new LocalSearchScope(clazz.getContainingFile), true).findAll().collect { case x: ScTypeDefinition => x } } def sealedAndAllChildrenAreIrreconcilable = { val areSealed = classes.forall{ case modOwner: ScModifierListOwner => modOwner.hasModifierProperty("sealed") case _ => false } def childrenAreIrreconcilable = inheritorsInSameFile(clazz1).forall { c1 => inheritorsInSameFile(clazz2).forall { c2 => isNeverSubClass(c1, c2) } } areSealed && childrenAreIrreconcilable } val areUnrelatedClasses = !areClassesEquivalent(clazz1, clazz2) && !(clazz1.isInheritor(clazz2, true) || clazz2.isInheritor(clazz1, true)) areUnrelatedClasses && (oneFinal || twoNonTraitsOrInterfaces || sealedAndAllChildrenAreIrreconcilable) } def isNeverSubType(tp1: ScType, tp2: ScType, sameType: Boolean = false): Boolean = { if (tp2.weakConforms(tp1) || tp1.weakConforms(tp2)) return false val Seq(clazzOpt1, clazzOpt2) = Seq(tp1, tp2).map(_.tryExtractDesignatorSingleton.extractClass) if (clazzOpt1.isEmpty || clazzOpt2.isEmpty) return false val (clazz1, clazz2) = (clazzOpt1.get, clazzOpt2.get) def isNeverSameType(tp1: ScType, tp2: ScType) = isNeverSubType(tp1, tp2, sameType = true) def isNeverSubArgs(tps1: Seq[ScType], tps2: Seq[ScType], tparams: Seq[PsiTypeParameter]): Boolean = { def isNeverSubArg(t1: ScType, t2: ScType, variance: Variance) = { variance match { case Covariant => isNeverSubType(t2, t1) case Contravariant => isNeverSubType(t1, t2) case Invariant => isNeverSameType(t1, t2) } } def getVariance(tp: PsiTypeParameter) = tp match { case scParam: ScTypeParam => scParam.variance case _ => Invariant } tps1.zip(tps2).zip(tparams.map(getVariance)) exists { case ((t1, t2), vr) => isNeverSubArg(t1, t2, vr) case _ => false } } def neverSubArgs() = { (tp1, tp2) match { case (ParameterizedType(_, args1), ParameterizedType(_, args2)) => isNeverSubArgs(args1, args2, clazz2.getTypeParameters) case _ => false } } isNeverSubClass(clazz1, clazz2) || ((areClassesEquivalent(clazz1, clazz2) || (!sameType) && clazz1.isInheritor(clazz2, true)) && neverSubArgs()) } }
loskutov/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/types/ComparingUtil.scala
Scala
apache-2.0
3,615
package Scalisp class InterpreterException(s: String) extends Exception(s) { } class VariableNotFound(s: String) extends InterpreterException(s) { } class MethodNotFound(s: String) extends InterpreterException(s) { } class InvalidName(s: String) extends InterpreterException(s) { } class TypeError(s: String) extends InterpreterException(s) { } case class Function(parms: List[String], body: Any) { def arity = parms.length } class FunctionTable() { val fs = collection.mutable.Map[Int, Function]() def add(f: Function) { fs(f.arity) = f } def apply(arity: Int): Option[Function] = fs.get(arity) override def toString = "Function table: " + fs.mkString("\\n") } object Interpreter { def eval(expression: Any, environment: Env): Any = { var exp = expression var env = environment while(true) { exp match { case l: List[Any] => l.head match { // special forms case "if" => eval(l(1), env) match { case true => exp = l(2) case false => exp = l(3) } case "define" => return l(1) match { case name: String => env.define(name, eval(l(2), env)) case _ => throw new InvalidName("variable name has to be a string") } case "set!" => return l(1) match { case name: String => env.update(name, eval(l(2), env)) case _ => throw new InvalidName("variable name has to be a string") } case "begin" => l.tail.init.map(e => eval(e, env)) exp = l.last case "quote" => return l(1) case "lambda" => return l(1) match { case parms: List[Any] => val p = parms.map { case n: String => n case _ => "parm names have to be strings" } Function(p, l(2)) case _ => throw new InterpreterException("lambda arguments have to be a list") } case "defun" => return l(1) match { case name: String => l(2) match { case parms: List[Any] => val p = parms.map { case n: String => n case _ => "parm names have to be strings" } if(l.length != 4) throw new InterpreterException("function has to have form (defun <name> <parms> <bod>)") val f = Function(p, l(3)) env.define(name, f) case _ => throw new InterpreterException("function arguments have to be a list") } case _ => throw new InvalidName("function name has to be a string") } case "let" => return l(1) match { case names: List[String] => l(2) match { case vals: List[Any] => val body = l(3) val context = new Env(env) val args = vals.map(e => eval(e, env)) names.zip(args).foreach { case (param: String, value: Any) => context.define(param, value) } eval(body, context) case _ => throw new InterpreterException("let values have to be a list") } case _ => throw new InterpreterException("let names have to be a list of strings") } case n: String if Builtins.builtins(l, env).isDefinedAt(n) => return Builtins.builtins(l, env)(n) case s: String => env.getFunction(s, l.tail.length) match { case None => val b = Builtins.builtins(l, env) val fname = env(s) match { case Some(s: String) => s case _ => throw new MethodNotFound(s) } if( b.isDefinedAt(fname)) { return b(fname) } else { throw new MethodNotFound(s) } case Some(Function(parms, body)) => val context = new Env(env) val args = l.tail.map(e => eval(e, env)) parms.zip(args).foreach { case (param: String, value: Any) => context.define(param, value) } exp = body env = context } case _ => throw new TypeError("can't call non-string function") } case s: String => return env(s) match { case None => throw new VariableNotFound(s) case Some(v) => v } // basic values case n: Long => return n case d: Double => return d case Literal(l) => return l } } } }
quantintel/Scalisp
src/main/scala/interpreter.scala
Scala
mit
4,589
package at.vizu.s2n.types.result /** * Phil on 06.11.15. */ trait ReflectImplementation extends Implementation
viZu/nasca
src/main/scala/at/vizu/s2n/types/result/ReflectImplementation.scala
Scala
apache-2.0
113
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.runtime.stream.sql import org.apache.flink.api.scala._ import org.apache.flink.streaming.api.TimeCharacteristic import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment import org.apache.flink.streaming.api.watermark.Watermark import org.apache.flink.table.api._ import org.apache.flink.table.api.bridge.scala._ import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData, StreamingWithStateTestBase} import org.apache.flink.types.Row import org.junit.Assert.assertEquals import org.junit._ import java.util import scala.collection.mutable class JoinITCase extends StreamingWithStateTestBase { @Before def clear(): Unit = { StreamITCase.clear } // Tests for inner join. /** test proctime inner join **/ @Test def testProcessTimeInnerJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setParallelism(1) val sqlQuery = """ |SELECT t2.a, t2.c, t1.c |FROM T1 as t1 join T2 as t2 ON | t1.a = t2.a AND | t1.proctime BETWEEN t2.proctime - INTERVAL '5' SECOND AND | t2.proctime + INTERVAL '5' SECOND |""".stripMargin val data1 = new mutable.MutableList[(Int, Long, String)] data1.+=((1, 1L, "Hi1")) data1.+=((1, 2L, "Hi2")) data1.+=((1, 5L, "Hi3")) data1.+=((2, 7L, "Hi5")) data1.+=((1, 9L, "Hi6")) data1.+=((1, 8L, "Hi8")) val data2 = new mutable.MutableList[(Int, Long, String)] data2.+=((1, 1L, "HiHi")) data2.+=((2, 2L, "HeHe")) val t1 = env.fromCollection(data1).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select(('a === 1)?(nullOf(Types.INT), 'a) as 'a, 'b, 'c, 'proctime) // test null values val t2 = env.fromCollection(data2).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select(('a === 1)?(nullOf(Types.INT), 'a) as 'a, 'b, 'c, 'proctime) // test null values tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() } /** test proctime inner join with other condition **/ @Test def testProcessTimeInnerJoinWithOtherConditions(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setParallelism(2) val sqlQuery = """ |SELECT t2.a, t2.c, t1.c |FROM T1 as t1 JOIN T2 as t2 ON | t1.a = t2.a AND | t1.proctime BETWEEN t2.proctime - interval '5' SECOND AND | t2.proctime + interval '5' second AND | t1.b = t2.b |""".stripMargin val data1 = new mutable.MutableList[(String, Long, String)] data1.+=(("1", 1L, "Hi1")) data1.+=(("1", 2L, "Hi2")) data1.+=(("1", 5L, "Hi3")) data1.+=(("2", 7L, "Hi5")) data1.+=(("1", 9L, "Hi6")) data1.+=(("1", 8L, "Hi8")) val data2 = new mutable.MutableList[(String, Long, String)] data2.+=(("1", 5L, "HiHi")) data2.+=(("2", 2L, "HeHe")) // For null key test data1.+=((null.asInstanceOf[String], 20L, "leftNull")) data2.+=((null.asInstanceOf[String], 20L, "rightNull")) val t1 = env.fromCollection(data1).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) val t2 = env.fromCollection(data2).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() // Assert there is no result with null keys. Assert.assertFalse(StreamITCase.testResults.toString().contains("null")) } /** test rowtime inner join **/ @Test def testRowTimeInnerJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, t2.id, t1.id |FROM T1 as t1 join T2 as t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt - INTERVAL '5' SECOND AND | t2.rt + INTERVAL '6' SECOND |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] // for boundary test data1.+=(("A", "LEFT0.999", 999L)) data1.+=(("A", "LEFT1", 1000L)) data1.+=(("A", "LEFT2", 2000L)) data1.+=(("A", "LEFT3", 3000L)) data1.+=(("B", "LEFT4", 4000L)) data1.+=(("A", "LEFT5", 5000L)) data1.+=(("A", "LEFT6", 6000L)) // test null key data1.+=((null.asInstanceOf[String], "LEFT8", 8000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "RIGHT6", 6000L)) data2.+=(("B", "RIGHT7", 7000L)) // test null key data2.+=((null.asInstanceOf[String], "RIGHT10", 10000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,RIGHT6,LEFT1") expected.add("A,RIGHT6,LEFT2") expected.add("A,RIGHT6,LEFT3") expected.add("A,RIGHT6,LEFT5") expected.add("A,RIGHT6,LEFT6") expected.add("B,RIGHT7,LEFT4") StreamITCase.compareWithList(expected) } /** test rowtime inner join with equi-times **/ @Test def testRowTimeInnerJoinWithEquiTimeAttrs(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, t2.id, t1.id |FROM T1 as t1 join T2 as t2 ON | t1.key = t2.key AND | t2.rt = t1.rt |""".stripMargin val data1 = new mutable.MutableList[(Int, Long, String, Long)] data1.+=((4, 4000L, "A", 4000L)) data1.+=((5, 5000L, "A", 5000L)) data1.+=((6, 6000L, "A", 6000L)) data1.+=((6, 6000L, "B", 6000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-5", 5000L)) data2.+=(("B", "R-6", 6000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row4WatermarkExtractor) .toTable(tEnv, 'id, 'tm, 'key, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,R-5,5") expected.add("B,R-6,6") StreamITCase.compareWithList(expected) } /** test rowtime inner join with other conditions **/ @Test def testRowTimeInnerJoinWithOtherConditions(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.a, t1.c, t2.c |FROM T1 as t1 JOIN T2 as t2 ON | t1.a = t2.a AND | t1.rt > t2.rt - INTERVAL '5' SECOND AND | t1.rt < t2.rt - INTERVAL '1' SECOND AND | t1.b < t2.b AND | t1.b > 2 |""".stripMargin val data1 = new mutable.MutableList[(Int, Long, String, Long)] data1.+=((1, 4L, "LEFT1", 1000L)) // for boundary test data1.+=((1, 8L, "LEFT1.1", 1001L)) // predicate (t1.b > 2) push down data1.+=((1, 2L, "LEFT2", 2000L)) data1.+=((1, 7L, "LEFT3", 3000L)) data1.+=((2, 5L, "LEFT4", 4000L)) // for boundary test data1.+=((1, 4L, "LEFT4.9", 4999L)) data1.+=((1, 4L, "LEFT5", 5000L)) data1.+=((1, 10L, "LEFT6", 6000L)) val data2 = new mutable.MutableList[(Int, Long, String, Long)] // just for watermark data2.+=((1, 1L, "RIGHT1", 1000L)) data2.+=((1, 9L, "RIGHT6", 6000L)) data2.+=((2, 14L, "RIGHT7", 7000L)) data2.+=((1, 4L, "RIGHT8", 8000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row4WatermarkExtractor) .toTable(tEnv, 'a, 'b, 'c, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row4WatermarkExtractor) .toTable(tEnv, 'a, 'b, 'c, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() // There may be two expected results according to the process order. val expected = new util.ArrayList[String] expected.add("1,LEFT3,RIGHT6") expected.add("1,LEFT1.1,RIGHT6") expected.add("2,LEFT4,RIGHT7") expected.add("1,LEFT4.9,RIGHT6") StreamITCase.compareWithList(expected) } /** test rowtime inner join with another time condition **/ @Test def testRowTimeInnerJoinWithOtherTimeCondition(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.a, t1.c, t2.c |FROM T1 as t1 JOIN T2 as t2 ON | t1.a = t2.a AND | t1.rt > t2.rt - INTERVAL '4' SECOND AND | t1.rt < t2.rt AND | QUARTER(t1.rt) = t2.a |""".stripMargin val data1 = new mutable.MutableList[(Int, Long, String, Long)] data1.+=((1, 4L, "LEFT1", 1000L)) data1.+=((1, 2L, "LEFT2", 2000L)) data1.+=((1, 7L, "LEFT3", 3000L)) data1.+=((2, 5L, "LEFT4", 4000L)) data1.+=((1, 4L, "LEFT5", 5000L)) data1.+=((1, 10L, "LEFT6", 6000L)) val data2 = new mutable.MutableList[(Int, Long, String, Long)] data2.+=((1, 1L, "RIGHT1", 1000L)) data2.+=((1, 9L, "RIGHT6", 6000L)) data2.+=((2, 8, "RIGHT7", 7000L)) data2.+=((1, 4L, "RIGHT8", 8000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row4WatermarkExtractor) .toTable(tEnv, 'a, 'b, 'c, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row4WatermarkExtractor) .toTable(tEnv, 'a, 'b, 'c, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("1,LEFT3,RIGHT6") expected.add("1,LEFT5,RIGHT6") expected.add("1,LEFT5,RIGHT8") expected.add("1,LEFT6,RIGHT8") StreamITCase.compareWithList(expected) } /** test rowtime inner join with window aggregation **/ @Test def testRowTimeInnerJoinWithWindowAggregateOnFirstTime(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t1.key, TUMBLE_END(t1.rt, INTERVAL '4' SECOND), COUNT(t2.key) |FROM T1 AS t1 join T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt - INTERVAL '5' SECOND AND | t2.rt + INTERVAL '5' SECOND |GROUP BY TUMBLE(t1.rt, INTERVAL '4' SECOND), t1.key |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] data1.+=(("A", "L-1", 1000L)) // no joining record data1.+=(("A", "L-2", 2000L)) // 1 joining record data1.+=(("A", "L-3", 3000L)) // 2 joining records data1.+=(("B", "L-4", 4000L)) // 1 joining record data1.+=(("C", "L-5", 4000L)) // no joining record data1.+=(("A", "L-6", 10000L)) // 2 joining records data1.+=(("A", "L-7", 13000L)) // 1 joining record val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-1", 7000L)) // 3 joining records data2.+=(("B", "R-4", 7000L)) // 1 joining records data2.+=(("A", "R-3", 8000L)) // 3 joining records data2.+=(("D", "R-2", 8000L)) // no joining record val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,1970-01-01 00:00:04.0,3") expected.add("A,1970-01-01 00:00:12.0,2") expected.add("A,1970-01-01 00:00:16.0,1") expected.add("B,1970-01-01 00:00:08.0,1") StreamITCase.compareWithList(expected) } /** test rowtime inner join with window aggregation **/ @Test def testRowTimeInnerJoinWithWindowAggregateOnSecondTime(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, TUMBLE_END(t2.rt, INTERVAL '4' SECOND), COUNT(t1.key) |FROM T1 AS t1 join T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt - INTERVAL '5' SECOND AND | t2.rt + INTERVAL '5' SECOND |GROUP BY TUMBLE(t2.rt, INTERVAL '4' SECOND), t2.key |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] data1.+=(("A", "L-1", 1000L)) // no joining record data1.+=(("A", "L-2", 2000L)) // 1 joining record data1.+=(("A", "L-3", 3000L)) // 2 joining records data1.+=(("B", "L-4", 4000L)) // 1 joining record data1.+=(("C", "L-5", 4000L)) // no joining record data1.+=(("A", "L-6", 10000L)) // 2 joining records data1.+=(("A", "L-7", 13000L)) // 1 joining record val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-1", 7000L)) // 3 joining records data2.+=(("B", "R-4", 7000L)) // 1 joining records data2.+=(("A", "R-3", 8000L)) // 3 joining records data2.+=(("D", "R-2", 8000L)) // no joining record val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,1970-01-01 00:00:08.0,3") expected.add("A,1970-01-01 00:00:12.0,3") expected.add("B,1970-01-01 00:00:08.0,1") StreamITCase.compareWithList(expected) } // Tests for left outer join @Test def testProcTimeLeftOuterJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setParallelism(1) val sqlQuery = """ |SELECT t2.a, t2.c, t1.c |FROM T1 as t1 LEFT OUTER JOIN T2 as t2 ON | t1.a = t2.a AND | t1.proctime BETWEEN t2.proctime - INTERVAL '5' SECOND AND | t2.proctime + INTERVAL '3' SECOND |""".stripMargin val data1 = new mutable.MutableList[(Int, Long, String)] data1.+=((1, 1L, "Hi1")) data1.+=((1, 2L, "Hi2")) data1.+=((1, 5L, "Hi3")) data1.+=((2, 7L, "Hi5")) val data2 = new mutable.MutableList[(Int, Long, String)] data2.+=((1, 1L, "HiHi")) data2.+=((2, 2L, "HeHe")) val t1 = env.fromCollection(data1).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select('a, 'b, 'c, 'proctime) val t2 = env.fromCollection(data2).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select('a, 'b, 'c, 'proctime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() } @Test def testRowTimeLeftOuterJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t1.key, t2.id, t1.id |FROM T1 AS t1 LEFT OUTER JOIN T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt - INTERVAL '5' SECOND AND | t2.rt + INTERVAL '6' SECOND AND | t1.id <> 'L-5' |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] // for boundary test data1.+=(("A", "L-1", 1000L)) data1.+=(("A", "L-2", 2000L)) data1.+=(("B", "L-4", 4000L)) data1.+=(("B", "L-5", 5000L)) data1.+=(("A", "L-6", 6000L)) data1.+=(("C", "L-7", 7000L)) data1.+=(("A", "L-10", 10000L)) data1.+=(("A", "L-12", 12000L)) data1.+=(("A", "L-20", 20000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-6", 6000L)) data2.+=(("B", "R-7", 7000L)) data2.+=(("D", "R-8", 8000L)) data2.+=(("A", "R-11", 11000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,R-6,L-1") expected.add("A,R-6,L-2") expected.add("A,R-6,L-6") expected.add("A,R-6,L-10") expected.add("A,R-6,L-12") expected.add("B,R-7,L-4") expected.add("A,R-11,L-6") expected.add("A,R-11,L-10") expected.add("A,R-11,L-12") expected.add("B,null,L-5") expected.add("C,null,L-7") expected.add("A,null,L-20") StreamITCase.compareWithList(expected) } @Test def testRowTimeLeftOuterJoinNegativeWindowSize(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, t2.id, t1.id |FROM T1 AS t1 LEFT OUTER JOIN T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt + INTERVAL '3' SECOND AND | t2.rt + INTERVAL '1' SECOND |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] // for boundary test data1.+=(("A", "L-1", 1000L)) data1.+=(("B", "L-4", 4000L)) data1.+=(("C", "L-7", 7000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-6", 6000L)) data2.+=(("B", "R-7", 7000L)) data2.+=(("D", "R-8", 8000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("null,null,L-1") expected.add("null,null,L-4") expected.add("null,null,L-7") StreamITCase.compareWithList(expected) } // Tests for right outer join @Test def testProcTimeRightOuterJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setParallelism(1) val sqlQuery = """ |SELECT t2.a, t2.c, t1.c |FROM T1 as t1 RIGHT OUTER JOIN T2 as t2 ON | t1.a = t2.a AND | t1.proctime BETWEEN t2.proctime - INTERVAL '5' SECOND AND | t2.proctime + INTERVAL '3' SECOND |""".stripMargin val data1 = new mutable.MutableList[(Int, Long, String)] data1.+=((1, 1L, "Hi1")) data1.+=((1, 2L, "Hi2")) data1.+=((1, 5L, "Hi3")) data1.+=((2, 7L, "Hi5")) val data2 = new mutable.MutableList[(Int, Long, String)] data2.+=((1, 1L, "HiHi")) data2.+=((2, 2L, "HeHe")) val t1 = env.fromCollection(data1).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select('a, 'b, 'c, 'proctime) val t2 = env.fromCollection(data2).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select('a, 'b, 'c, 'proctime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() } @Test def testRowTimeRightOuterJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, t2.id, t1.id |FROM T1 AS t1 RIGHT OUTER JOIN T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt - INTERVAL '5' SECOND AND | t2.rt + INTERVAL '6' SECOND AND | t2.id <> 'R-5' |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] // for boundary test data1.+=(("A", "L-1", 1000L)) data1.+=(("A", "L-2", 2000L)) data1.+=(("B", "L-4", 4000L)) data1.+=(("A", "L-6", 6000L)) data1.+=(("C", "L-7", 7000L)) data1.+=(("A", "L-10", 10000L)) data1.+=(("A", "L-12", 12000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-5", 5000L)) data2.+=(("A", "R-6", 6000L)) data2.+=(("B", "R-7", 7000L)) data2.+=(("D", "R-8", 8000L)) data2.+=(("A", "R-20", 20000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,R-5,null") expected.add("A,R-6,L-1") expected.add("A,R-6,L-2") expected.add("A,R-6,L-6") expected.add("A,R-6,L-10") expected.add("A,R-6,L-12") expected.add("A,R-20,null") expected.add("B,R-7,L-4") expected.add("D,R-8,null") StreamITCase.compareWithList(expected) } @Test def testRowTimeRightOuterJoinNegativeWindowSize(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, t2.id, t1.id |FROM T1 AS t1 RIGHT OUTER JOIN T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt + INTERVAL '5' SECOND AND | t2.rt + INTERVAL '1' SECOND |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] // for boundary test data1.+=(("A", "L-1", 1000L)) data1.+=(("B", "L-4", 4000L)) data1.+=(("C", "L-7", 7000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-6", 6000L)) data2.+=(("B", "R-7", 7000L)) data2.+=(("D", "R-8", 8000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,R-6,null") expected.add("B,R-7,null") expected.add("D,R-8,null") StreamITCase.compareWithList(expected) } // Tests for full outer join @Test def testProcTimeFullOuterJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setParallelism(1) val sqlQuery = """ |SELECT t2.a, t2.c, t1.c |FROM T1 as t1 Full OUTER JOIN T2 as t2 ON | t1.a = t2.a AND | t1.proctime BETWEEN t2.proctime - INTERVAL '5' SECOND AND | t2.proctime |""".stripMargin val data1 = new mutable.MutableList[(Int, Long, String)] data1.+=((1, 1L, "Hi1")) data1.+=((1, 2L, "Hi2")) data1.+=((1, 5L, "Hi3")) data1.+=((2, 7L, "Hi5")) val data2 = new mutable.MutableList[(Int, Long, String)] data2.+=((1, 1L, "HiHi")) data2.+=((2, 2L, "HeHe")) val t1 = env.fromCollection(data1).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select('a, 'b, 'c, 'proctime) val t2 = env.fromCollection(data2).toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime) .select('a, 'b, 'c, 'proctime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() } @Test def testRowTimeFullOuterJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, t2.id, t1.id |FROM T1 AS t1 FULL OUTER JOIN T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt - INTERVAL '5' SECOND AND | t2.rt + INTERVAL '6' SECOND AND | NOT (t1.id = 'L-5' OR t2.id = 'R-5') |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] // for boundary test data1.+=(("A", "L-1", 1000L)) data1.+=(("A", "L-2", 2000L)) data1.+=(("B", "L-4", 4000L)) data1.+=(("B", "L-5", 5000L)) data1.+=(("A", "L-6", 6000L)) data1.+=(("C", "L-7", 7000L)) data1.+=(("A", "L-10", 10000L)) data1.+=(("A", "L-12", 12000L)) data1.+=(("A", "L-20", 20000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-5", 5000L)) data2.+=(("A", "R-6", 6000L)) data2.+=(("B", "R-7", 7000L)) data2.+=(("D", "R-8", 8000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("A,R-6,L-1") expected.add("A,R-6,L-2") expected.add("A,R-6,L-6") expected.add("A,R-6,L-10") expected.add("A,R-6,L-12") expected.add("B,R-7,L-4") expected.add("A,R-5,null") expected.add("D,R-8,null") expected.add("null,null,L-5") expected.add("null,null,L-7") expected.add("null,null,L-20") StreamITCase.compareWithList(expected) } @Test def testRowTimeFullOuterJoinNegativeWindowSize(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val sqlQuery = """ |SELECT t2.key, t2.id, t1.id |FROM T1 AS t1 FULL OUTER JOIN T2 AS t2 ON | t1.key = t2.key AND | t1.rt BETWEEN t2.rt + INTERVAL '5' SECOND AND | t2.rt + INTERVAL '4' SECOND |""".stripMargin val data1 = new mutable.MutableList[(String, String, Long)] // for boundary test data1.+=(("A", "L-1", 1000L)) data1.+=(("B", "L-4", 4000L)) data1.+=(("C", "L-7", 7000L)) val data2 = new mutable.MutableList[(String, String, Long)] data2.+=(("A", "R-6", 6000L)) data2.+=(("B", "R-7", 7000L)) data2.+=(("D", "R-8", 8000L)) val t1 = env.fromCollection(data1) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) val t2 = env.fromCollection(data2) .assignTimestampsAndWatermarks(new Row3WatermarkExtractor2) .toTable(tEnv, 'key, 'id, 'rt.rowtime) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = new java.util.ArrayList[String] expected.add("null,null,L-1") expected.add("null,null,L-4") expected.add("null,null,L-7") expected.add("A,R-6,null") expected.add("B,R-7,null") expected.add("D,R-8,null") StreamITCase.compareWithList(expected) } /** test non-window inner join **/ @Test def testNonWindowInnerJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val data1 = new mutable.MutableList[(Int, Long, String)] data1.+=((1, 1L, "Hi1")) data1.+=((1, 2L, "Hi2")) data1.+=((1, 2L, "Hi2")) data1.+=((1, 5L, "Hi3")) data1.+=((2, 7L, "Hi5")) data1.+=((1, 9L, "Hi6")) data1.+=((1, 8L, "Hi8")) data1.+=((3, 8L, "Hi9")) val data2 = new mutable.MutableList[(Int, Long, String)] data2.+=((1, 1L, "HiHi")) data2.+=((2, 2L, "HeHe")) data2.+=((3, 2L, "HeHe")) val t1 = env.fromCollection(data1).toTable(tEnv, 'a, 'b, 'c) .select(('a === 3) ? (nullOf(Types.INT), 'a) as 'a, 'b, 'c) val t2 = env.fromCollection(data2).toTable(tEnv, 'a, 'b, 'c) .select(('a === 3) ? (nullOf(Types.INT), 'a) as 'a, 'b, 'c) tEnv.registerTable("T1", t1) tEnv.registerTable("T2", t2) val sqlQuery = """ |SELECT t2.a, t2.c, t1.c |FROM T1 as t1 JOIN T2 as t2 ON | t1.a = t2.a AND | t1.b > t2.b |""".stripMargin val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row] result.addSink(new StreamITCase.StringSink[Row]) env.execute() val expected = mutable.MutableList( "1,HiHi,Hi2", "1,HiHi,Hi2", "1,HiHi,Hi3", "1,HiHi,Hi6", "1,HiHi,Hi8", "2,HeHe,Hi5", "null,HeHe,Hi9") assertEquals(expected.sorted, StreamITCase.testResults.sorted) } @Test def testInnerJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT c, g FROM Table3, Table5 WHERE b = e" val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("Hi,Hallo", "Hello,Hallo Welt", "Hello world,Hallo Welt") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testInnerJoinWithFilter(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT c, g FROM Table3, Table5 WHERE b = e AND b < 2" val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("Hi,Hallo") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testInnerJoinWithNonEquiJoinPredicate(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT c, g FROM Table3, Table5 WHERE b = e AND a < 6 AND h < b" val ds1 = StreamTestData.get3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("Hello world, how are you?,Hallo Welt wie", "I am fine.,Hallo Welt wie") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testInnerJoinWithMultipleKeys(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT c, g FROM Table3, Table5 WHERE a = d AND b = h" val ds1 = StreamTestData.get3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq( "Hi,Hallo", "Hello,Hallo Welt", "Hello world,Hallo Welt wie gehts?", "Hello world,ABC", "I am fine.,HIJ", "I am fine.,IJK") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testInnerJoinWithAlias(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT Table5.c, T.`1-_./Ü` FROM (SELECT a, b, c AS `1-_./Ü` FROM Table3) AS T, Table5 " + "WHERE a = d AND a < 4" val ds1 = StreamTestData.get3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'c) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("1,Hi", "2,Hello", "1,Hello", "2,Hello world", "2,Hello world", "3,Hello world") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testInnerJoinWithAggregation(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setParallelism(1) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT COUNT(g), COUNT(b) FROM Table3, Table5 WHERE a = d" val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("6,6") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testLeftJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) StreamITCase.clear env.setStateBackend(getStateBackend) val sqlQuery = "SELECT c, g FROM Table5 LEFT OUTER JOIN Table3 ON b = e" val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("Hi,Hallo", "Hello,Hallo Welt", "Hello world,Hallo Welt", "null,Hallo Welt wie", "null,Hallo Welt wie gehts?", "null,ABC", "null,BCD", "null,CDE", "null,DEF", "null,EFG", "null,FGH", "null,GHI", "null,HIJ", "null,IJK", "null,JKL", "null,KLM") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testRightJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT c, g FROM Table3 RIGHT OUTER JOIN Table5 ON b = e" val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("Hi,Hallo", "Hello,Hallo Welt", "Hello world,Hallo Welt", "null,Hallo Welt wie", "null,Hallo Welt wie gehts?", "null,ABC", "null,BCD", "null,CDE", "null,DEF", "null,EFG", "null,FGH", "null,GHI", "null,HIJ", "null,IJK", "null,JKL", "null,KLM") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testLeftSingleRightJoinEqualPredicate(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT a, cnt FROM (SELECT COUNT(*) AS cnt FROM B) RIGHT JOIN A ON cnt = a" val ds1 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e) val ds2 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'f, 'g, 'h) tEnv.registerTable("A", ds1) tEnv.registerTable("B", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq( "1,null", "2,null", "2,null", "3,3", "3,3", "3,3", "4,null", "4,null", "4," + "null", "4,null", "5,null", "5,null", "5,null", "5,null", "5,null") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } @Test def testFullOuterJoin(): Unit = { val env = StreamExecutionEnvironment.getExecutionEnvironment val settings = EnvironmentSettings.newInstance().useOldPlanner().build() val tEnv = StreamTableEnvironment.create(env, settings) env.setStateBackend(getStateBackend) val sqlQuery = "SELECT c, g FROM Table3 FULL OUTER JOIN Table5 ON b = e" val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c) val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h) tEnv.registerTable("Table3", ds1) tEnv.registerTable("Table5", ds2) val result = tEnv.sqlQuery(sqlQuery) val expected = Seq("Hi,Hallo", "Hello,Hallo Welt", "Hello world,Hallo Welt", "null,Hallo Welt wie", "null,Hallo Welt wie gehts?", "null,ABC", "null,BCD", "null,CDE", "null,DEF", "null,EFG", "null,FGH", "null,GHI", "null,HIJ", "null,IJK", "null,JKL", "null,KLM") val results = result.toRetractStream[Row] results.addSink(new StreamITCase.RetractingSink) env.execute() assertEquals(expected.sorted, StreamITCase.retractedResults.sorted) } } private class Row4WatermarkExtractor extends AssignerWithPunctuatedWatermarks[(Int, Long, String, Long)] { override def checkAndGetNextWatermark( lastElement: (Int, Long, String, Long), extractedTimestamp: Long): Watermark = { new Watermark(extractedTimestamp - 1) } override def extractTimestamp( element: (Int, Long, String, Long), previousElementTimestamp: Long): Long = { element._4 } } private class Row3WatermarkExtractor2 extends AssignerWithPunctuatedWatermarks[(String, String, Long)] { override def checkAndGetNextWatermark( lastElement: (String, String, Long), extractedTimestamp: Long): Watermark = { new Watermark(extractedTimestamp - 1) } override def extractTimestamp( element: (String, String, Long), previousElementTimestamp: Long): Long = { element._3 } }
tzulitai/flink
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/stream/sql/JoinITCase.scala
Scala
apache-2.0
46,186
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.execution import scala.collection.JavaConverters._ import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition} import org.apache.hadoop.hive.ql.plan.TableDesc import org.apache.hadoop.hive.serde.serdeConstants import org.apache.hadoop.hive.serde2.objectinspector._ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils import org.apache.spark.rdd.RDD import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.CastSupport import org.apache.spark.sql.catalyst.catalog.HiveTableRelation import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.metric.SQLMetrics import org.apache.spark.sql.hive._ import org.apache.spark.sql.hive.client.HiveClientImpl import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{BooleanType, DataType} import org.apache.spark.util.Utils /** * The Hive table scan operator. Column and partition pruning are both handled. * * @param requestedAttributes Attributes to be fetched from the Hive table. * @param relation The Hive table be scanned. * @param partitionPruningPred An optional partition pruning predicate for partitioned table. */ private[hive] case class HiveTableScanExec( requestedAttributes: Seq[Attribute], relation: HiveTableRelation, partitionPruningPred: Seq[Expression])( @transient private val sparkSession: SparkSession) extends LeafExecNode with CastSupport { require(partitionPruningPred.isEmpty || relation.isPartitioned, "Partition pruning predicates only supported for partitioned tables.") override def conf: SQLConf = sparkSession.sessionState.conf override def nodeName: String = s"Scan hive ${relation.tableMeta.qualifiedName}" override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) override def producedAttributes: AttributeSet = outputSet ++ AttributeSet(partitionPruningPred.flatMap(_.references)) private val originalAttributes = AttributeMap(relation.output.map(a => a -> a)) override val output: Seq[Attribute] = { // Retrieve the original attributes based on expression ID so that capitalization matches. requestedAttributes.map(originalAttributes) } // Bind all partition key attribute references in the partition pruning predicate for later // evaluation. private lazy val boundPruningPred = partitionPruningPred.reduceLeftOption(And).map { pred => require(pred.dataType == BooleanType, s"Data type of predicate $pred must be ${BooleanType.catalogString} rather than " + s"${pred.dataType.catalogString}.") BindReferences.bindReference(pred, relation.partitionCols) } @transient private lazy val hiveQlTable = HiveClientImpl.toHiveTable(relation.tableMeta) @transient private lazy val tableDesc = new TableDesc( hiveQlTable.getInputFormatClass, hiveQlTable.getOutputFormatClass, hiveQlTable.getMetadata) // Create a local copy of hadoopConf,so that scan specific modifications should not impact // other queries @transient private lazy val hadoopConf = { val c = sparkSession.sessionState.newHadoopConf() // append columns ids and names before broadcast addColumnMetadataToConf(c) c } @transient private lazy val hadoopReader = new HadoopTableReader( output, relation.partitionCols, tableDesc, sparkSession, hadoopConf) private def castFromString(value: String, dataType: DataType) = { cast(Literal(value), dataType).eval(null) } private def addColumnMetadataToConf(hiveConf: Configuration): Unit = { // Specifies needed column IDs for those non-partitioning columns. val columnOrdinals = AttributeMap(relation.dataCols.zipWithIndex) val neededColumnIDs = output.flatMap(columnOrdinals.get).map(o => o: Integer) HiveShim.appendReadColumns(hiveConf, neededColumnIDs, output.map(_.name)) val deserializer = tableDesc.getDeserializerClass.getConstructor().newInstance() deserializer.initialize(hiveConf, tableDesc.getProperties) // Specifies types and object inspectors of columns to be scanned. val structOI = ObjectInspectorUtils .getStandardObjectInspector( deserializer.getObjectInspector, ObjectInspectorCopyOption.JAVA) .asInstanceOf[StructObjectInspector] val columnTypeNames = structOI .getAllStructFieldRefs.asScala .map(_.getFieldObjectInspector) .map(TypeInfoUtils.getTypeInfoFromObjectInspector(_).getTypeName) .mkString(",") hiveConf.set(serdeConstants.LIST_COLUMN_TYPES, columnTypeNames) hiveConf.set(serdeConstants.LIST_COLUMNS, relation.dataCols.map(_.name).mkString(",")) } /** * Prunes partitions not involve the query plan. * * @param partitions All partitions of the relation. * @return Partitions that are involved in the query plan. */ private[hive] def prunePartitions(partitions: Seq[HivePartition]): Seq[HivePartition] = { boundPruningPred match { case None => partitions case Some(shouldKeep) => partitions.filter { part => val dataTypes = relation.partitionCols.map(_.dataType) val castedValues = part.getValues.asScala.zip(dataTypes) .map { case (value, dataType) => castFromString(value, dataType) } // Only partitioned values are needed here, since the predicate has already been bound to // partition key attribute references. val row = InternalRow.fromSeq(castedValues) shouldKeep.eval(row).asInstanceOf[Boolean] } } } @transient lazy val prunedPartitions: Seq[HivePartition] = { if (relation.prunedPartitions.nonEmpty) { val hivePartitions = relation.prunedPartitions.get.map(HiveClientImpl.toHivePartition(_, hiveQlTable)) if (partitionPruningPred.forall(!ExecSubqueryExpression.hasSubquery(_))) { hivePartitions } else { prunePartitions(hivePartitions) } } else { if (sparkSession.sessionState.conf.metastorePartitionPruning && partitionPruningPred.nonEmpty) { rawPartitions } else { prunePartitions(rawPartitions) } } } // exposed for tests @transient lazy val rawPartitions: Seq[HivePartition] = { val prunedPartitions = if (sparkSession.sessionState.conf.metastorePartitionPruning && partitionPruningPred.nonEmpty) { // Retrieve the original attributes based on expression ID so that capitalization matches. val normalizedFilters = partitionPruningPred.map(_.transform { case a: AttributeReference => originalAttributes(a) }) sparkSession.sessionState.catalog .listPartitionsByFilter(relation.tableMeta.identifier, normalizedFilters) } else { sparkSession.sessionState.catalog.listPartitions(relation.tableMeta.identifier) } prunedPartitions.map(HiveClientImpl.toHivePartition(_, hiveQlTable)) } protected override def doExecute(): RDD[InternalRow] = { // Using dummyCallSite, as getCallSite can turn out to be expensive with // multiple partitions. val rdd = if (!relation.isPartitioned) { Utils.withDummyCallSite(sqlContext.sparkContext) { hadoopReader.makeRDDForTable(hiveQlTable) } } else { Utils.withDummyCallSite(sqlContext.sparkContext) { hadoopReader.makeRDDForPartitionedTable(prunedPartitions) } } val numOutputRows = longMetric("numOutputRows") // Avoid to serialize MetastoreRelation because schema is lazy. (see SPARK-15649) val outputSchema = schema rdd.mapPartitionsWithIndexInternal { (index, iter) => val proj = UnsafeProjection.create(outputSchema) proj.initialize(index) iter.map { r => numOutputRows += 1 proj(r) } } } override def doCanonicalize(): HiveTableScanExec = { val input: AttributeSeq = relation.output HiveTableScanExec( requestedAttributes.map(QueryPlan.normalizeExpressions(_, input)), relation.canonicalized.asInstanceOf[HiveTableRelation], QueryPlan.normalizePredicates(partitionPruningPred, input))(sparkSession) } override def otherCopyArgs: Seq[AnyRef] = Seq(sparkSession) }
goldmedal/spark
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
Scala
apache-2.0
9,354
package com.twitter.finagle.netty4.ssl.server import com.twitter.finagle.{Address, FailureFlags, SslException, SslVerificationFailedException} import com.twitter.finagle.ssl.server.{SslServerConfiguration, SslServerSessionVerifier} import com.twitter.logging.{HasLogLevel, Level} import com.twitter.util.{Promise, Return, Throw} import io.netty.channel.{Channel, ChannelHandlerContext, ChannelInboundHandlerAdapter} import io.netty.handler.ssl.SslHandler import io.netty.util.concurrent.{GenericFutureListener, Future => NettyFuture} import javax.net.ssl.SSLSession import scala.util.control.NonFatal /** * Delays `channelActive` event until the TLS handshake is successfully finished * and verified. */ private[netty4] class SslServerVerificationHandler( sslHandler: SslHandler, remoteAddress: Address, config: SslServerConfiguration, sessionVerifier: SslServerSessionVerifier) extends ChannelInboundHandlerAdapter { self => private[this] val onHandshakeComplete = Promise[Unit]() private[this] def verifySession(session: SSLSession, ctx: ChannelHandlerContext): Unit = { try { if (sessionVerifier(remoteAddress, config, session)) { ctx.pipeline.remove(self) onHandshakeComplete.setDone() } else { val addr = Option(ctx.channel.remoteAddress) ctx.close() onHandshakeComplete.updateIfEmpty(Throw(new SslVerificationFailedException(None, addr))) } } catch { case NonFatal(e) => ctx.close() val addr = Option(ctx.channel.remoteAddress) onHandshakeComplete.updateIfEmpty(Throw(new SslVerificationFailedException(Some(e), addr))) } } override def channelActive(ctx: ChannelHandlerContext): Unit = { onHandshakeComplete.respond { case Return(_) => ctx.fireChannelActive() case _ => } if (!ctx.channel().config().isAutoRead) { ctx.read() } } override def handlerAdded(ctx: ChannelHandlerContext): Unit = { sslHandler .handshakeFuture() .addListener(new GenericFutureListener[NettyFuture[Channel]] { def operationComplete(f: NettyFuture[Channel]): Unit = { if (f.isSuccess) { val session = sslHandler.engine().getSession verifySession(session, ctx) } else if (f.isCancelled) { ctx.close() onHandshakeComplete.updateIfEmpty(Throw(new InterruptedSslException())) } else { ctx.close() onHandshakeComplete.updateIfEmpty(Throw(new HandshakeFailureException(f.cause))) } } }) super.handlerAdded(ctx) } } /** * Indicates that the SslHandler was interrupted while it was trying to complete the TLS handshake. */ private[netty4] class InterruptedSslException(val flags: Long = FailureFlags.Empty) extends SslException(None, None) with FailureFlags[InterruptedSslException] with HasLogLevel { override def exceptionMessage(): String = "The SslHandler was interrupted while it was trying to complete the TLS handshake." override def logLevel: Level = Level.WARNING protected def copyWithFlags(flags: Long): InterruptedSslException = new InterruptedSslException(flags) } private[netty4] class HandshakeFailureException( exn: Throwable, val flags: Long = FailureFlags.Empty) extends Exception("Failed to complete the TLS handshake.", exn) with FailureFlags[HandshakeFailureException] with HasLogLevel { def logLevel: Level = Level.WARNING protected def copyWithFlags(flags: Long): HandshakeFailureException = new HandshakeFailureException(exn, flags) }
luciferous/finagle
finagle-netty4/src/main/scala/com/twitter/finagle/netty4/ssl/server/SslServerVerificationHandler.scala
Scala
apache-2.0
3,622
package com.datawizards.dmg.examples import com.datawizards.dmg.examples.TestModel.Person import com.datawizards.dmg.service.AvroSchemaRegistryServiceImpl object CopyAvroSchemaToHDFS extends App { val service = new AvroSchemaRegistryServiceImpl("http://localhost:8081") service.copyAvroSchemaToHdfs[Person]("/metadata/schemas/person") }
mateuszboryn/data-model-generator
src/main/scala/com/datawizards/dmg/examples/CopyAvroSchemaToHDFS.scala
Scala
apache-2.0
342
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.columnar.compression import java.nio.ByteBuffer import java.nio.ByteOrder import scala.collection.mutable import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.SpecificInternalRow import org.apache.spark.sql.execution.columnar._ import org.apache.spark.sql.execution.vectorized.WritableColumnVector import org.apache.spark.sql.types._ private[columnar] case object PassThrough extends CompressionScheme { override val typeId = 0 override def supports(columnType: ColumnType[_]): Boolean = true override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T] = { new this.Encoder[T](columnType) } override def decoder[T <: AtomicType]( buffer: ByteBuffer, columnType: NativeColumnType[T]): Decoder[T] = { new this.Decoder(buffer, columnType) } class Encoder[T <: AtomicType](columnType: NativeColumnType[T]) extends compression.Encoder[T] { override def uncompressedSize: Int = 0 override def compressedSize: Int = 0 override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = { // Writes compression type ID and copies raw contents to.putInt(PassThrough.typeId).put(from).rewind() to } } class Decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]) extends compression.Decoder[T] { override def next(row: InternalRow, ordinal: Int): Unit = { columnType.extract(buffer, row, ordinal) } override def hasNext: Boolean = buffer.hasRemaining private def putBooleans( columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = { for (i <- 0 until len) { columnVector.putBoolean(pos + i, (buffer.get(bufferPos + i) != 0)) } } private def putBytes( columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = { columnVector.putBytes(pos, len, buffer.array, bufferPos) } private def putShorts( columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = { columnVector.putShorts(pos, len, buffer.array, bufferPos) } private def putInts( columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = { columnVector.putInts(pos, len, buffer.array, bufferPos) } private def putLongs( columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = { columnVector.putLongs(pos, len, buffer.array, bufferPos) } private def putFloats( columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = { columnVector.putFloats(pos, len, buffer.array, bufferPos) } private def putDoubles( columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = { columnVector.putDoubles(pos, len, buffer.array, bufferPos) } private def decompress0( columnVector: WritableColumnVector, capacity: Int, unitSize: Int, putFunction: (WritableColumnVector, Int, Int, Int) => Unit): Unit = { val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder()) nullsBuffer.rewind() val nullCount = ByteBufferHelper.getInt(nullsBuffer) var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else capacity var pos = 0 var seenNulls = 0 var bufferPos = buffer.position() while (pos < capacity) { if (pos != nextNullIndex) { val len = nextNullIndex - pos assert(len * unitSize.toLong < Int.MaxValue) putFunction(columnVector, pos, bufferPos, len) bufferPos += len * unitSize pos += len } else { seenNulls += 1 nextNullIndex = if (seenNulls < nullCount) { ByteBufferHelper.getInt(nullsBuffer) } else { capacity } columnVector.putNull(pos) pos += 1 } } } override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = { columnType.dataType match { case _: BooleanType => val unitSize = 1 decompress0(columnVector, capacity, unitSize, putBooleans) case _: ByteType => val unitSize = 1 decompress0(columnVector, capacity, unitSize, putBytes) case _: ShortType => val unitSize = 2 decompress0(columnVector, capacity, unitSize, putShorts) case _: IntegerType => val unitSize = 4 decompress0(columnVector, capacity, unitSize, putInts) case _: LongType => val unitSize = 8 decompress0(columnVector, capacity, unitSize, putLongs) case _: FloatType => val unitSize = 4 decompress0(columnVector, capacity, unitSize, putFloats) case _: DoubleType => val unitSize = 8 decompress0(columnVector, capacity, unitSize, putDoubles) } } } } private[columnar] case object RunLengthEncoding extends CompressionScheme { override val typeId = 1 override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T] = { new this.Encoder[T](columnType) } override def decoder[T <: AtomicType]( buffer: ByteBuffer, columnType: NativeColumnType[T]): Decoder[T] = { new this.Decoder(buffer, columnType) } override def supports(columnType: ColumnType[_]): Boolean = columnType match { case INT | LONG | SHORT | BYTE | STRING | BOOLEAN => true case _ => false } class Encoder[T <: AtomicType](columnType: NativeColumnType[T]) extends compression.Encoder[T] { private var _uncompressedSize = 0 private var _compressedSize = 0 // Using `MutableRow` to store the last value to avoid boxing/unboxing cost. private val lastValue = new SpecificInternalRow(Seq(columnType.dataType)) private var lastRun = 0 override def uncompressedSize: Int = _uncompressedSize override def compressedSize: Int = _compressedSize override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = { val value = columnType.getField(row, ordinal) val actualSize = columnType.actualSize(row, ordinal) _uncompressedSize += actualSize if (lastValue.isNullAt(0)) { columnType.copyField(row, ordinal, lastValue, 0) lastRun = 1 _compressedSize += actualSize + 4 } else { if (columnType.getField(lastValue, 0) == value) { lastRun += 1 } else { _compressedSize += actualSize + 4 columnType.copyField(row, ordinal, lastValue, 0) lastRun = 1 } } } override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = { to.putInt(RunLengthEncoding.typeId) if (from.hasRemaining) { val currentValue = new SpecificInternalRow(Seq(columnType.dataType)) var currentRun = 1 val value = new SpecificInternalRow(Seq(columnType.dataType)) columnType.extract(from, currentValue, 0) while (from.hasRemaining) { columnType.extract(from, value, 0) if (value.get(0, columnType.dataType) == currentValue.get(0, columnType.dataType)) { currentRun += 1 } else { // Writes current run columnType.append(currentValue, 0, to) to.putInt(currentRun) // Resets current run columnType.copyField(value, 0, currentValue, 0) currentRun = 1 } } // Writes the last run columnType.append(currentValue, 0, to) to.putInt(currentRun) } to.rewind() to } } class Decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]) extends compression.Decoder[T] { private var run = 0 private var valueCount = 0 private var currentValue: T#InternalType = _ override def next(row: InternalRow, ordinal: Int): Unit = { if (valueCount == run) { currentValue = columnType.extract(buffer) run = ByteBufferHelper.getInt(buffer) valueCount = 1 } else { valueCount += 1 } columnType.setField(row, ordinal, currentValue) } override def hasNext: Boolean = valueCount < run || buffer.hasRemaining private def putBoolean(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = { columnVector.putBoolean(pos, value == 1) } private def getByte(buffer: ByteBuffer): Long = { buffer.get().toLong } private def putByte(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = { columnVector.putByte(pos, value.toByte) } private def getShort(buffer: ByteBuffer): Long = { buffer.getShort().toLong } private def putShort(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = { columnVector.putShort(pos, value.toShort) } private def getInt(buffer: ByteBuffer): Long = { buffer.getInt().toLong } private def putInt(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = { columnVector.putInt(pos, value.toInt) } private def getLong(buffer: ByteBuffer): Long = { buffer.getLong() } private def putLong(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = { columnVector.putLong(pos, value) } private def decompress0( columnVector: WritableColumnVector, capacity: Int, getFunction: (ByteBuffer) => Long, putFunction: (WritableColumnVector, Int, Long) => Unit): Unit = { val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder()) nullsBuffer.rewind() val nullCount = ByteBufferHelper.getInt(nullsBuffer) var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1 var pos = 0 var seenNulls = 0 var runLocal = 0 var valueCountLocal = 0 var currentValueLocal: Long = 0 while (valueCountLocal < runLocal || (pos < capacity)) { if (pos != nextNullIndex) { if (valueCountLocal == runLocal) { currentValueLocal = getFunction(buffer) runLocal = ByteBufferHelper.getInt(buffer) valueCountLocal = 1 } else { valueCountLocal += 1 } putFunction(columnVector, pos, currentValueLocal) } else { seenNulls += 1 if (seenNulls < nullCount) { nextNullIndex = ByteBufferHelper.getInt(nullsBuffer) } columnVector.putNull(pos) } pos += 1 } } override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = { columnType.dataType match { case _: BooleanType => decompress0(columnVector, capacity, getByte, putBoolean) case _: ByteType => decompress0(columnVector, capacity, getByte, putByte) case _: ShortType => decompress0(columnVector, capacity, getShort, putShort) case _: IntegerType => decompress0(columnVector, capacity, getInt, putInt) case _: LongType => decompress0(columnVector, capacity, getLong, putLong) case _ => throw new IllegalStateException("Not supported type in RunLengthEncoding.") } } } } private[columnar] case object DictionaryEncoding extends CompressionScheme { override val typeId = 2 // 32K unique values allowed val MAX_DICT_SIZE = Short.MaxValue override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]) : Decoder[T] = { new this.Decoder(buffer, columnType) } override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T] = { new this.Encoder[T](columnType) } override def supports(columnType: ColumnType[_]): Boolean = columnType match { case INT | LONG | STRING => true case _ => false } class Encoder[T <: AtomicType](columnType: NativeColumnType[T]) extends compression.Encoder[T] { // Size of the input, uncompressed, in bytes. Note that we only count until the dictionary // overflows. private var _uncompressedSize = 0 // If the number of distinct elements is too large, we discard the use of dictionary encoding // and set the overflow flag to true. private var overflow = false // Total number of elements. private var count = 0 // The reverse mapping of _dictionary, i.e. mapping encoded integer to the value itself. private var values = new mutable.ArrayBuffer[T#InternalType](1024) // The dictionary that maps a value to the encoded short integer. private val dictionary = mutable.HashMap.empty[Any, Short] // Size of the serialized dictionary in bytes. Initialized to 4 since we need at least an `Int` // to store dictionary element count. private var dictionarySize = 4 override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = { val value = columnType.getField(row, ordinal) if (!overflow) { val actualSize = columnType.actualSize(row, ordinal) count += 1 _uncompressedSize += actualSize if (!dictionary.contains(value)) { if (dictionary.size < MAX_DICT_SIZE) { val clone = columnType.clone(value) values += clone dictionarySize += actualSize dictionary(clone) = dictionary.size.toShort } else { overflow = true values.clear() dictionary.clear() } } } } override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = { if (overflow) { throw new IllegalStateException( "Dictionary encoding should not be used because of dictionary overflow.") } to.putInt(DictionaryEncoding.typeId) .putInt(dictionary.size) var i = 0 while (i < values.length) { columnType.append(values(i), to) i += 1 } while (from.hasRemaining) { to.putShort(dictionary(columnType.extract(from))) } to.rewind() to } override def uncompressedSize: Int = _uncompressedSize override def compressedSize: Int = if (overflow) Int.MaxValue else dictionarySize + count * 2 } class Decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]) extends compression.Decoder[T] { val elementNum = ByteBufferHelper.getInt(buffer) private val dictionary: Array[Any] = new Array[Any](elementNum) private var intDictionary: Array[Int] = null private var longDictionary: Array[Long] = null columnType.dataType match { case _: IntegerType => intDictionary = new Array[Int](elementNum) for (i <- 0 until elementNum) { val v = columnType.extract(buffer).asInstanceOf[Int] intDictionary(i) = v dictionary(i) = v } case _: LongType => longDictionary = new Array[Long](elementNum) for (i <- 0 until elementNum) { val v = columnType.extract(buffer).asInstanceOf[Long] longDictionary(i) = v dictionary(i) = v } case _: StringType => for (i <- 0 until elementNum) { val v = columnType.extract(buffer).asInstanceOf[Any] dictionary(i) = v } } override def next(row: InternalRow, ordinal: Int): Unit = { columnType.setField(row, ordinal, dictionary(buffer.getShort()).asInstanceOf[T#InternalType]) } override def hasNext: Boolean = buffer.hasRemaining override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = { val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder()) nullsBuffer.rewind() val nullCount = ByteBufferHelper.getInt(nullsBuffer) var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1 var pos = 0 var seenNulls = 0 columnType.dataType match { case _: IntegerType => val dictionaryIds = columnVector.reserveDictionaryIds(capacity) columnVector.setDictionary(new ColumnDictionary(intDictionary)) while (pos < capacity) { if (pos != nextNullIndex) { dictionaryIds.putInt(pos, buffer.getShort()) } else { seenNulls += 1 if (seenNulls < nullCount) nextNullIndex = ByteBufferHelper.getInt(nullsBuffer) columnVector.putNull(pos) } pos += 1 } case _: LongType => val dictionaryIds = columnVector.reserveDictionaryIds(capacity) columnVector.setDictionary(new ColumnDictionary(longDictionary)) while (pos < capacity) { if (pos != nextNullIndex) { dictionaryIds.putInt(pos, buffer.getShort()) } else { seenNulls += 1 if (seenNulls < nullCount) { nextNullIndex = ByteBufferHelper.getInt(nullsBuffer) } columnVector.putNull(pos) } pos += 1 } case _ => throw new IllegalStateException("Not supported type in DictionaryEncoding.") } } } } private[columnar] case object BooleanBitSet extends CompressionScheme { override val typeId = 3 val BITS_PER_LONG = 64 override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]) : compression.Decoder[T] = { new this.Decoder(buffer).asInstanceOf[compression.Decoder[T]] } override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): compression.Encoder[T] = { (new this.Encoder).asInstanceOf[compression.Encoder[T]] } override def supports(columnType: ColumnType[_]): Boolean = columnType == BOOLEAN class Encoder extends compression.Encoder[BooleanType.type] { private var _uncompressedSize = 0 override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = { _uncompressedSize += BOOLEAN.defaultSize } override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = { to.putInt(BooleanBitSet.typeId) // Total element count (1 byte per Boolean value) .putInt(from.remaining) while (from.remaining >= BITS_PER_LONG) { var word = 0: Long var i = 0 while (i < BITS_PER_LONG) { if (BOOLEAN.extract(from)) { word |= (1: Long) << i } i += 1 } to.putLong(word) } if (from.hasRemaining) { var word = 0: Long var i = 0 while (from.hasRemaining) { if (BOOLEAN.extract(from)) { word |= (1: Long) << i } i += 1 } to.putLong(word) } to.rewind() to } override def uncompressedSize: Int = _uncompressedSize override def compressedSize: Int = { val extra = if (_uncompressedSize % BITS_PER_LONG == 0) 0 else 1 (_uncompressedSize / BITS_PER_LONG + extra) * 8 + 4 } } class Decoder(buffer: ByteBuffer) extends compression.Decoder[BooleanType.type] { private val count = ByteBufferHelper.getInt(buffer) private var currentWord = 0: Long private var visited: Int = 0 override def next(row: InternalRow, ordinal: Int): Unit = { val bit = visited % BITS_PER_LONG visited += 1 if (bit == 0) { currentWord = ByteBufferHelper.getLong(buffer) } row.setBoolean(ordinal, ((currentWord >> bit) & 1) != 0) } override def hasNext: Boolean = visited < count override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = { val countLocal = count var currentWordLocal: Long = 0 var visitedLocal: Int = 0 val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder()) nullsBuffer.rewind() val nullCount = ByteBufferHelper.getInt(nullsBuffer) var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1 var pos = 0 var seenNulls = 0 while (visitedLocal < countLocal) { if (pos != nextNullIndex) { val bit = visitedLocal % BITS_PER_LONG visitedLocal += 1 if (bit == 0) { currentWordLocal = ByteBufferHelper.getLong(buffer) } columnVector.putBoolean(pos, ((currentWordLocal >> bit) & 1) != 0) } else { seenNulls += 1 if (seenNulls < nullCount) { nextNullIndex = ByteBufferHelper.getInt(nullsBuffer) } columnVector.putNull(pos) } pos += 1 } } } } private[columnar] case object IntDelta extends CompressionScheme { override def typeId: Int = 4 override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]) : compression.Decoder[T] = { new Decoder(buffer, INT).asInstanceOf[compression.Decoder[T]] } override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): compression.Encoder[T] = { (new Encoder).asInstanceOf[compression.Encoder[T]] } override def supports(columnType: ColumnType[_]): Boolean = columnType == INT class Encoder extends compression.Encoder[IntegerType.type] { protected var _compressedSize: Int = 0 protected var _uncompressedSize: Int = 0 override def compressedSize: Int = _compressedSize override def uncompressedSize: Int = _uncompressedSize private var prevValue: Int = _ override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = { val value = row.getInt(ordinal) val delta = value - prevValue _compressedSize += 1 // If this is the first integer to be compressed, or the delta is out of byte range, then give // up compressing this integer. if (_uncompressedSize == 0 || delta <= Byte.MinValue || delta > Byte.MaxValue) { _compressedSize += INT.defaultSize } _uncompressedSize += INT.defaultSize prevValue = value } override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = { to.putInt(typeId) if (from.hasRemaining) { var prev = from.getInt() to.put(Byte.MinValue) to.putInt(prev) while (from.hasRemaining) { val current = from.getInt() val delta = current - prev prev = current if (Byte.MinValue < delta && delta <= Byte.MaxValue) { to.put(delta.toByte) } else { to.put(Byte.MinValue) to.putInt(current) } } } to.rewind().asInstanceOf[ByteBuffer] } } class Decoder(buffer: ByteBuffer, columnType: NativeColumnType[IntegerType.type]) extends compression.Decoder[IntegerType.type] { private var prev: Int = _ override def hasNext: Boolean = buffer.hasRemaining override def next(row: InternalRow, ordinal: Int): Unit = { val delta = buffer.get() prev = if (delta > Byte.MinValue) prev + delta else ByteBufferHelper.getInt(buffer) row.setInt(ordinal, prev) } override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = { var prevLocal: Int = 0 val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder()) nullsBuffer.rewind() val nullCount = ByteBufferHelper.getInt(nullsBuffer) var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1 var pos = 0 var seenNulls = 0 while (pos < capacity) { if (pos != nextNullIndex) { val delta = buffer.get prevLocal = if (delta > Byte.MinValue) { prevLocal + delta } else { ByteBufferHelper.getInt(buffer) } columnVector.putInt(pos, prevLocal) } else { seenNulls += 1 if (seenNulls < nullCount) { nextNullIndex = ByteBufferHelper.getInt(nullsBuffer) } columnVector.putNull(pos) } pos += 1 } } } } private[columnar] case object LongDelta extends CompressionScheme { override def typeId: Int = 5 override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T]) : compression.Decoder[T] = { new Decoder(buffer, LONG).asInstanceOf[compression.Decoder[T]] } override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): compression.Encoder[T] = { (new Encoder).asInstanceOf[compression.Encoder[T]] } override def supports(columnType: ColumnType[_]): Boolean = columnType == LONG class Encoder extends compression.Encoder[LongType.type] { protected var _compressedSize: Int = 0 protected var _uncompressedSize: Int = 0 override def compressedSize: Int = _compressedSize override def uncompressedSize: Int = _uncompressedSize private var prevValue: Long = _ override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = { val value = row.getLong(ordinal) val delta = value - prevValue _compressedSize += 1 // If this is the first long integer to be compressed, or the delta is out of byte range, then // give up compressing this long integer. if (_uncompressedSize == 0 || delta <= Byte.MinValue || delta > Byte.MaxValue) { _compressedSize += LONG.defaultSize } _uncompressedSize += LONG.defaultSize prevValue = value } override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = { to.putInt(typeId) if (from.hasRemaining) { var prev = from.getLong() to.put(Byte.MinValue) to.putLong(prev) while (from.hasRemaining) { val current = from.getLong() val delta = current - prev prev = current if (Byte.MinValue < delta && delta <= Byte.MaxValue) { to.put(delta.toByte) } else { to.put(Byte.MinValue) to.putLong(current) } } } to.rewind().asInstanceOf[ByteBuffer] } } class Decoder(buffer: ByteBuffer, columnType: NativeColumnType[LongType.type]) extends compression.Decoder[LongType.type] { private var prev: Long = _ override def hasNext: Boolean = buffer.hasRemaining override def next(row: InternalRow, ordinal: Int): Unit = { val delta = buffer.get() prev = if (delta > Byte.MinValue) prev + delta else ByteBufferHelper.getLong(buffer) row.setLong(ordinal, prev) } override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = { var prevLocal: Long = 0 val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder()) nullsBuffer.rewind val nullCount = ByteBufferHelper.getInt(nullsBuffer) var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1 var pos = 0 var seenNulls = 0 while (pos < capacity) { if (pos != nextNullIndex) { val delta = buffer.get() prevLocal = if (delta > Byte.MinValue) { prevLocal + delta } else { ByteBufferHelper.getLong(buffer) } columnVector.putLong(pos, prevLocal) } else { seenNulls += 1 if (seenNulls < nullCount) { nextNullIndex = ByteBufferHelper.getInt(nullsBuffer) } columnVector.putNull(pos) } pos += 1 } } } }
bravo-zhang/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/compression/compressionSchemes.scala
Scala
apache-2.0
28,337
package scalapoi import shapeless._ import cats._ import cats.data._ import cats.implicits._ import org.apache.poi.ss.usermodel.{Cell, Row} import org.apache.poi.xssf.usermodel._ // the following code is designed to derive a PoiEncoder instance for flat case classes object instances { implicit val stringEncoder: PoiEncoder[String] = PoiEncoder.pure(1)(s => List(StrCell(s))) implicit val booleanEncoder: PoiEncoder[Boolean] = PoiEncoder.pure(1)(b => List(BoolCell(b))) implicit val intEncoder: PoiEncoder[Int] = PoiEncoder.pure(1)(i => List(NumCell(i))) implicit val doubleEncoder: PoiEncoder[Double] = PoiEncoder.pure(1)(d => List(NumCell(d))) implicit val longEncoder: PoiEncoder[Long] = PoiEncoder.pure(1)(l => List(NumCell(l))) implicit def optionEncoder[A](implicit E: PoiEncoder[A]): PoiEncoder[Option[A]] = PoiEncoder.pure(E.width)(o => o.map(E.encode).getOrElse(List.fill(E.width)(BlankCell))) implicit val hnilEncoder: PoiEncoder[HNil] = PoiEncoder.pure(0)(hnil => Nil) implicit def hlistEncoder[H, T <: HList]( implicit hEncoder: Lazy[PoiEncoder[H]], tEncoder: PoiEncoder[T] ): PoiEncoder[H :: T] = PoiEncoder.pure(hEncoder.value.width + tEncoder.width) { case h :: t => hEncoder.value.encode(h) ++ tEncoder.encode(t) } implicit val cnilEncoder: PoiEncoder[CNil] = PoiEncoder.pure(0)(cnil => throw new Exception("unreachable")) implicit def coproductEncoder[H, T <: shapeless.Coproduct]( implicit hEncoder: Lazy[PoiEncoder[H]], tEncoder: PoiEncoder[T] ): PoiEncoder[H :+: T] = PoiEncoder.pure(hEncoder.value.width + tEncoder.width) { case Inl(h) => hEncoder.value.encode(h) ++ List.fill(tEncoder.width)(BlankCell) case Inr(t) => List.fill(hEncoder.value.width)(BlankCell) ++ tEncoder.encode(t) } implicit def genericEncoder[A, R]( implicit gen: Generic.Aux[A, R], enc: Lazy[PoiEncoder[R]] ): PoiEncoder[A] = PoiEncoder.pure(enc.value.width)(a => enc.value.encode(gen.to(a))) }
hamishdickson/scalapoi
src/main/scala/scalapoi/instances.scala
Scala
mit
1,999
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.orc import org.apache.hadoop.io._ import org.apache.orc.TypeDescription import org.apache.orc.mapred.{OrcList, OrcMap, OrcStruct, OrcTimestamp} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.SpecializedGetters import org.apache.spark.sql.catalyst.util._ import org.apache.spark.sql.errors.QueryExecutionErrors import org.apache.spark.sql.types._ /** * A serializer to serialize Spark rows to ORC structs. */ class OrcSerializer(dataSchema: StructType) { private val result = createOrcValue(dataSchema).asInstanceOf[OrcStruct] private val converters = dataSchema.map(_.dataType).map(newConverter(_)).toArray def serialize(row: InternalRow): OrcStruct = { var i = 0 while (i < converters.length) { if (row.isNullAt(i)) { result.setFieldValue(i, null) } else { result.setFieldValue(i, converters(i)(row, i)) } i += 1 } result } private type Converter = (SpecializedGetters, Int) => WritableComparable[_] /** * Creates a converter to convert Catalyst data at the given ordinal to ORC values. */ private def newConverter( dataType: DataType, reuseObj: Boolean = true): Converter = dataType match { case NullType => (getter, ordinal) => null case BooleanType => if (reuseObj) { val result = new BooleanWritable() (getter, ordinal) => result.set(getter.getBoolean(ordinal)) result } else { (getter, ordinal) => new BooleanWritable(getter.getBoolean(ordinal)) } case ByteType => if (reuseObj) { val result = new ByteWritable() (getter, ordinal) => result.set(getter.getByte(ordinal)) result } else { (getter, ordinal) => new ByteWritable(getter.getByte(ordinal)) } case ShortType => if (reuseObj) { val result = new ShortWritable() (getter, ordinal) => result.set(getter.getShort(ordinal)) result } else { (getter, ordinal) => new ShortWritable(getter.getShort(ordinal)) } case IntegerType => if (reuseObj) { val result = new IntWritable() (getter, ordinal) => result.set(getter.getInt(ordinal)) result } else { (getter, ordinal) => new IntWritable(getter.getInt(ordinal)) } case LongType => if (reuseObj) { val result = new LongWritable() (getter, ordinal) => result.set(getter.getLong(ordinal)) result } else { (getter, ordinal) => new LongWritable(getter.getLong(ordinal)) } case FloatType => if (reuseObj) { val result = new FloatWritable() (getter, ordinal) => result.set(getter.getFloat(ordinal)) result } else { (getter, ordinal) => new FloatWritable(getter.getFloat(ordinal)) } case DoubleType => if (reuseObj) { val result = new DoubleWritable() (getter, ordinal) => result.set(getter.getDouble(ordinal)) result } else { (getter, ordinal) => new DoubleWritable(getter.getDouble(ordinal)) } // Don't reuse the result object for string and binary as it would cause extra data copy. case StringType => (getter, ordinal) => new Text(getter.getUTF8String(ordinal).getBytes) case BinaryType => (getter, ordinal) => new BytesWritable(getter.getBinary(ordinal)) case DateType => OrcShimUtils.getDateWritable(reuseObj) // The following cases are already expensive, reusing object or not doesn't matter. case TimestampType => (getter, ordinal) => val ts = DateTimeUtils.toJavaTimestamp(getter.getLong(ordinal)) val result = new OrcTimestamp(ts.getTime) result.setNanos(ts.getNanos) result case DecimalType.Fixed(precision, scale) => OrcShimUtils.getHiveDecimalWritable(precision, scale) case st: StructType => (getter, ordinal) => val result = createOrcValue(st).asInstanceOf[OrcStruct] val fieldConverters = st.map(_.dataType).map(newConverter(_)) val numFields = st.length val struct = getter.getStruct(ordinal, numFields) var i = 0 while (i < numFields) { if (struct.isNullAt(i)) { result.setFieldValue(i, null) } else { result.setFieldValue(i, fieldConverters(i)(struct, i)) } i += 1 } result case ArrayType(elementType, _) => (getter, ordinal) => val result = createOrcValue(dataType).asInstanceOf[OrcList[WritableComparable[_]]] // Need to put all converted values to a list, can't reuse object. val elementConverter = newConverter(elementType, reuseObj = false) val array = getter.getArray(ordinal) var i = 0 while (i < array.numElements()) { if (array.isNullAt(i)) { result.add(null) } else { result.add(elementConverter(array, i)) } i += 1 } result case MapType(keyType, valueType, _) => (getter, ordinal) => val result = createOrcValue(dataType) .asInstanceOf[OrcMap[WritableComparable[_], WritableComparable[_]]] // Need to put all converted values to a list, can't reuse object. val keyConverter = newConverter(keyType, reuseObj = false) val valueConverter = newConverter(valueType, reuseObj = false) val map = getter.getMap(ordinal) val keyArray = map.keyArray() val valueArray = map.valueArray() var i = 0 while (i < map.numElements()) { val key = keyConverter(keyArray, i) if (valueArray.isNullAt(i)) { result.put(key, null) } else { result.put(key, valueConverter(valueArray, i)) } i += 1 } result case udt: UserDefinedType[_] => newConverter(udt.sqlType) case _ => throw QueryExecutionErrors.dataTypeUnsupportedYetError(dataType) } /** * Return a Orc value object for the given Spark schema. */ private def createOrcValue(dataType: DataType) = { OrcStruct.createValue(TypeDescription.fromString(OrcFileFormat.getQuotedSchemaString(dataType))) } }
wangmiao1981/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcSerializer.scala
Scala
apache-2.0
7,097
package dao import javax.inject.{Inject, Named} import model.{Airport, Country, Runaway} import play.api.cache._ import scala.concurrent.Future class CachingDAO @Inject()( @Named("appDAO") dao: DAO, cache: CacheApi ) extends DAO { override def allAirports(): Future[List[Airport]] = { val data = cache.getOrElse("allAirports")(dao.allAirports()) data } override def allCountries(): Future[List[Country]] ={ val data = cache.getOrElse("allCountries")(dao.allCountries()) data } override def allRunaways(): Future[List[Runaway]] = { val data = cache.getOrElse("allRunaways")(dao.allRunaways()) data } override def findAirportsByName(countryCode: String): Future[List[(Airport, List[Runaway])]] = { val data = cache.getOrElse(countryCode)(dao.findAirportsByName(countryCode)) data } override def allCountriesSortedByNumberOfAirports(): Future[List[(Country, Int)]] = { val data = cache.getOrElse("allCountriesSortedByNumberOfAirports")(dao.allCountriesSortedByNumberOfAirports()) data } override def typeOfSurfacesPerCountry(): Future[List[(Country, List[String])]] = { val data = cache.getOrElse("typeOfSurfacesPerCountry")(dao.typeOfSurfacesPerCountry()) data } override def topIdentifications(): Future[List[(String, Int)]] = { val data = cache.getOrElse("topIdentifications")(dao.topIdentifications()) data } }
MysterionRise/airport-dangerzone
app/dao/CachingDAO.scala
Scala
mit
1,489
// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. package ducttape.hyperdag.walker import grizzled.slf4j.Logging /** A generalization of the iterator concept to allow parallel traversal * of structured collections (e.g. DAGs). Callers can use a producer-consumer * pattern to accomplish parallel traversal by calling take/complete. * Implementations must be threadsafe and might take the form of an * agenda-based traversal algorithm. */ trait Walker[A] extends Iterable[A] with Logging { // TODO: Should this be a TraversableOnce? private val self = this /** Get the next traversable item. Returns None when there are no more elements */ private[hyperdag] def take(): Option[A] /** Callers must use this method to notify walker that caller is done with each * item so that walker can traverse its dependends * continue: False indicates that no dependents of the specified item should be traversed */ private[hyperdag] def complete(item: A, continue: Boolean = true) /** Get a synchronous iterator (not appropriate for multi-threaded consumers) */ def iterator() = new Iterator[A] { var nextItem: Option[A] = None override def hasNext: Boolean = { if(nextItem == None) nextItem = self.take nextItem != None } override def next: A = { val hazNext = hasNext require(hazNext, "No more items. Call hasNext() first.") val result: A = nextItem.get nextItem = None self.complete(result) result } } // TODO: Add a .par(j) method that returns a parallel walker // j = numCores (as in make -j) def foreach[U](j: Int, f: A => U) { import java.util.concurrent._ import collection.JavaConversions._ // TODO: Write as tail recursion so that breaking out of the loop isn't as complicated val pool = Executors.newFixedThreadPool(j) // kill the entire thread pool if there's an internal failure within the walker def catchAndKillPool[T](func: => T):T = { try { func } catch { case ie: InterruptedException => throw ie case t: Throwable => { pool.shutdownNow() // may hang forever otherwise throw t } } } val tasks: Seq[Callable[Unit]] = (0 until j).map(i => new Callable[Unit] { override def call { var running = true while (running) { catchAndKillPool { take() } match { case Some(a) => { var success = true try { debug("Executing callback for %s".format(a)) f(a) } catch { // catch exceptions happening within the callback case t: Throwable => { success = false // this won't kill off the main thread until we // call get() on the Future object below throw t } } finally { // mark as complete, but don't run any dependencies // TODO: Keep a list of tasks that failed? debug("UNSUCCESSFUL, NOT CONTINUING: " + a) catchAndKillPool { complete(a, continue=success) } } } case None => { running = false } } } trace("Worker thread %d of %d joined".format(i+1, j)) } }) // start running tasks in thread pool // wait a few years or until all tasks complete val futures = pool.invokeAll(tasks, Long.MaxValue, TimeUnit.MILLISECONDS) pool.shutdown() // call get on each future so that we propagate any exceptions futures.foreach(_.get) } }
jhclark/ducttape
src/main/scala/ducttape/hyperdag/walker/Walker.scala
Scala
mpl-2.0
3,881
package com.datastax.killrweather import scala.concurrent.duration._ import com.datastax.killrweather.GitHubEvents.MonthlyCommits import kafka.producer.{KeyedMessage, Producer} import kafka.serializer.StringDecoder import org.apache.spark.streaming.{Seconds, StreamingContext} import org.apache.spark.{SparkContext, SparkConf} import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.kafka.KafkaUtils import org.json4s._ import org.json4s.native.JsonParser import com.datastax.spark.connector.cql.CassandraConnector import com.datastax.spark.connector.embedded.{Assertions, EmbeddedKafka} /** * Uses json4s for json work from the kafka stream. */ object KafkaStreamingJson2 extends App with Assertions { import com.datastax.spark.connector.streaming._ println("-->KafkaStreamingJson2 TOP...") implicit val formats = DefaultFormats println("---> <1>") /* Small sample data */ val data = Seq( """{"user":"helena","commits":98, "month":3, "year":2015}""", """{"user":"jacek-lewandowski", "commits":72, "month":3, "year":2015}""", """{"user":"pkolaczk", "commits":42, "month":3, "year":2015}""") println("---> <2>") /* Kafka (embedded) setup */ val kafka = new EmbeddedKafka println("---> <3>") kafka.createTopic("github") println("---> About to call producer new, send, and close...") // simulate another process streaming data to Kafka val producer = new Producer[String, String](kafka.producerConfig) data.foreach (m => producer.send(new KeyedMessage[String, String]("github", "githubstats", m))) producer.close() /* Spark initialization */ val conf = new SparkConf().setAppName(getClass.getSimpleName) .setMaster("local[*]") .set("spark.cassandra.connection.host", "127.0.0.1") .set("spark.cleaner.ttl", "5000") val ssc = new StreamingContext(new SparkContext(conf), Seconds(1)) /* Cassandra setup */ CassandraConnector(conf).withSessionDo { session => session.execute("DROP KEYSPACE IF EXISTS githubstats") session.execute("CREATE KEYSPACE githubstats WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1 }") session.execute("""CREATE TABLE githubstats.monthly_commits (user VARCHAR PRIMARY KEY, commits INT, month INT, year INT)""") } val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder]( ssc, kafka.kafkaParams, Map("github" -> 5), StorageLevel.MEMORY_ONLY) .map{ case (_,v) => JsonParser.parse(v).extract[MonthlyCommits]} .saveToCassandra("githubstats","monthly_commits") println("--->About to call ssc.start...") ssc.start() println("--->After ssc.start...") /* validate */ val table = ssc.cassandraTable[MonthlyCommits]("githubstats", "monthly_commits") awaitCond(table.collect.size > 1, 5.seconds) table.toLocalIterator foreach println ssc.awaitTermination() println("--->KafkaStreamingJson2 BOTTOM") }
GitOutATown/killrweather
killrweather-examples/src/main/scala/com/datastax/killrweather/KafkaStreamingJson2.scala
Scala
apache-2.0
3,058
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.mxnet.infer import org.apache.mxnet.io.NDArrayIter import org.apache.mxnet.{Context, DataDesc, NDArray, Shape} import org.apache.mxnet.module.Module import scala.collection.mutable.ListBuffer import org.slf4j.LoggerFactory /** * Base Trait for MXNet Predictor classes. */ private[infer] trait PredictBase { /** * Converts indexed sequences of 1-D array to NDArrays. * <p> * This method will take input as IndexedSeq one dimensional arrays and creates the * NDArray needed for inference. The array will be reshaped based on the input descriptors. * @param input: An IndexedSequence of a one-dimensional array. An IndexedSequence is needed when the model has more than one input. * @return Indexed sequence array of outputs */ def predict(input: IndexedSeq[Array[Float]]): IndexedSeq[Array[Float]] /** * Predict using NDArray as input. * <p> * This method is useful when the input is a batch of data * or when multiple operations on the input have to performed. * Note: User is responsible for managing allocation/deallocation of NDArrays. * @param input IndexedSequence NDArrays. * @return Output of predictions as NDArrays. */ def predictWithNDArray(input: IndexedSeq[NDArray]): IndexedSeq[NDArray] } /** * Implementation of prediction routines. * * @param modelPathPrefix Path prefix from where to load the model artifacts. * These include the symbol, parameters, and synset.txt * Example: file://model-dir/resnet-152 (containing * resnet-152-symbol.json, resnet-152-0000.params, and synset.txt). * @param inputDescriptors Descriptors defining the input node names, shape, * layout and type parameters * <p>Note: If the input Descriptors is missing batchSize * ('N' in layout), a batchSize of 1 is assumed for the model. * @param contexts Device contexts on which you want to run inference; defaults to CPU * @param epoch Model epoch to load; defaults to 0 */ class Predictor(modelPathPrefix: String, protected val inputDescriptors: IndexedSeq[DataDesc], protected val contexts: Array[Context] = Context.cpu(), protected val epoch: Option[Int] = Some(0)) extends PredictBase { private val logger = LoggerFactory.getLogger(classOf[Predictor]) require(inputDescriptors.head.layout.size != 0, "layout size should not be zero") protected[infer] var batchIndex = inputDescriptors(0).layout.indexOf('N') protected[infer] var batchSize = if (batchIndex != -1) inputDescriptors(0).shape(batchIndex) else 1 protected[infer] var iDescriptors = inputDescriptors inputDescriptors.foreach((f: DataDesc) => require(f.layout.indexOf('N') == batchIndex, "batch size should be in the same index for all inputs")) if (batchIndex != -1) { inputDescriptors.foreach((f: DataDesc) => require(f.shape(batchIndex) == batchSize, "batch size should be same for all inputs")) } else { // Note: this is assuming that the input needs a batch logger.warn("InputDescriptor does not have batchSize, using 1 as the default batchSize") iDescriptors = inputDescriptors.map((f: DataDesc) => new DataDesc(f.name, Shape(1 +: f.shape.toVector), f.dtype, 'N' +: f.layout)) batchIndex = 1 } protected[infer] val mxNetHandler = MXNetHandler() protected[infer] val mod = loadModule() /** * Takes input as IndexedSeq one dimensional arrays and creates the NDArray needed for inference * The array will be reshaped based on the input descriptors. * * @param input: An IndexedSequence of a one-dimensional array. An IndexedSequence is needed when the model has more than one input. * @return Indexed sequence array of outputs */ override def predict(input: IndexedSeq[Array[Float]]) : IndexedSeq[Array[Float]] = { require(input.length == inputDescriptors.length, "number of inputs provided: %d" + " does not match number of inputs in inputDescriptors: %d".format(input.length, inputDescriptors.length)) for((i, d) <- input.zip(inputDescriptors)) { require (i.length == d.shape.product/batchSize, "number of elements:" + " %d in the input does not match the shape:%s".format( i.length, d.shape.toString())) } var inputND: ListBuffer[NDArray] = ListBuffer.empty[NDArray] for((i, d) <- input.zip(inputDescriptors)) { val shape = d.shape.toVector.patch(from = batchIndex, patch = Vector(1), replaced = 1) inputND += mxNetHandler.execute(NDArray.array(i, Shape(shape))) } // rebind with batchsize 1 if (batchSize != 1) { val desc = iDescriptors.map((f : DataDesc) => new DataDesc(f.name, Shape(f.shape.toVector.patch(batchIndex, Vector(1), 1)), f.dtype, f.layout) ) mxNetHandler.execute(mod.bind(desc, forceRebind = true, forTraining = false)) } val resultND = mxNetHandler.execute(mod.predict(new NDArrayIter( inputND.toIndexedSeq, dataBatchSize = 1))) val result = resultND.map((f : NDArray) => f.toArray) mxNetHandler.execute(inputND.foreach(_.dispose)) mxNetHandler.execute(resultND.foreach(_.dispose)) // rebind to batchSize if (batchSize != 1) { mxNetHandler.execute(mod.bind(inputDescriptors, forTraining = false, forceRebind = true)) } result } /** * Predict using NDArray as input * This method is useful when the input is a batch of data * Note: User is responsible for managing allocation/deallocation of input/output NDArrays. * * @param inputBatch IndexedSequence NDArrays * @return Output of predictions as NDArrays */ override def predictWithNDArray(inputBatch: IndexedSeq[NDArray]): IndexedSeq[NDArray] = { require(inputBatch.length == inputDescriptors.length, "number of inputs provided: %d" + " do not match number of inputs in inputDescriptors: %d".format(inputBatch.length, inputDescriptors.length)) // Shape validation, remove this when backend throws better error messages. for((i, d) <- inputBatch.zip(iDescriptors)) { require(inputBatch(0).shape(batchIndex) == i.shape(batchIndex), "All inputs should be of same batch size") require(i.shape.drop(batchIndex + 1) == d.shape.drop(batchIndex + 1), "Input Data Shape: %s should match the inputDescriptor shape: %s except batchSize".format( i.shape.toString, d.shape.toString)) } val inputBatchSize = inputBatch(0).shape(batchIndex) // rebind with the new batchSize if (batchSize != inputBatchSize) { val desc = iDescriptors.map((f : DataDesc) => new DataDesc(f.name, Shape(f.shape.toVector.patch(batchIndex, Vector(inputBatchSize), 1)), f.dtype, f.layout) ) mxNetHandler.execute(mod.bind(desc, forceRebind = true, forTraining = false)) } val resultND = mxNetHandler.execute(mod.predict(new NDArrayIter( inputBatch, dataBatchSize = inputBatchSize))) if (batchSize != inputBatchSize) { mxNetHandler.execute(mod.bind(iDescriptors, forceRebind = true, forTraining = false)) } resultND } private[infer] def loadModule(): Module = { val mod = mxNetHandler.execute(Module.loadCheckpoint(modelPathPrefix, epoch.get, contexts = contexts)) mxNetHandler.execute(mod.bind(inputDescriptors, forTraining = false)) mod } }
indhub/mxnet
scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala
Scala
apache-2.0
8,514
package cd final class Vector3D(val x: Double, val y: Double, val z: Double) { def plus(other: Vector3D) = new Vector3D(x + other.x, y + other.y, z + other.z) def minus(other: Vector3D) = new Vector3D(x - other.x, y - other.y, z - other.z) def dot(other: Vector3D): Double = x * other.x + y * other.y + z * other.z def squaredMagnitude(): Double = this.dot(this) def magnitude(): Double = Math.sqrt(squaredMagnitude()) def times(amount: Double): Vector3D = new Vector3D(x * amount, y * amount, z * amount) }
cedricviaccoz/scala-native
benchmarks/src/main/scala/cd/Vector3D.scala
Scala
bsd-3-clause
550
package org.jetbrains.plugins.scala package lang package psi package stubs package elements import _root_.org.jetbrains.plugins.scala.lang.psi.impl.statements.ScPatternDefinitionImpl import com.intellij.lang.ASTNode import com.intellij.psi.PsiElement import org.jetbrains.plugins.scala.lang.psi.api.statements.ScValue /** * User: Alexander Podkhalyuzin * Date: 17.10.2008 */ class ScValueDefinitionElementType extends ScValueElementType[ScValue]("value definition"){ def createElement(node: ASTNode): PsiElement = new ScPatternDefinitionImpl(node) def createPsi(stub: ScValueStub) = new ScPatternDefinitionImpl(stub) }
LPTK/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScValueDefinitionElementType.scala
Scala
apache-2.0
629
package services.crunch import drt.shared.CrunchApi._ import drt.shared.{Forecast, Queues, SDateLike} import org.specs2.mutable.Specification import services.{CSVData, SDate} class ForecastPlanningToCSVDataTest extends Specification { "Forecast Planning export" >> { "Given a ForecastPeriod with 1 day when we export to CSV, we should see the same data as a CSV" >> { val day1Midnight = SDate("2017-10-25T00:00:00Z") val t0000Millis = day1Midnight.millisSinceEpoch val t0015Millis = day1Midnight.addMinutes(15).millisSinceEpoch val t0030Millis = day1Midnight.addMinutes(30).millisSinceEpoch val t0045Millis = day1Midnight.addMinutes(45).millisSinceEpoch val t0100Millis = day1Midnight.addMinutes(60).millisSinceEpoch val forecast = ForecastPeriod(Map( t0000Millis -> Seq( ForecastTimeSlot(t0000Millis, 1, 2), ForecastTimeSlot(t0015Millis, 3, 4), ForecastTimeSlot(t0030Millis, 5, 6), ForecastTimeSlot(t0045Millis, 7, 8), ForecastTimeSlot(t0100Millis, 9, 10) ) )) val result = CSVData.forecastPeriodToCsv(forecast) val expected = s"""|,${day1Midnight.getDate()}/${day1Midnight.getMonth()} - available,25/10 - required,25/10 - difference |01:00,1,2,-1 |01:15,3,4,-1 |01:30,5,6,-1 |01:45,7,8,-1 |02:00,9,10,-1""".stripMargin result === expected } "Given ForecastPeriod with no data, we should get an empty list of timeslots" >> { val forecast = ForecastPeriod(Map()) val result = Forecast.timeSlotStartTimes(forecast, CSVData.millisToHoursAndMinutesString) val expected = List() result === expected } "Given a ForecastPeriod with 3 days when we export to CSV, we should see the same data as a CSV" >> { val day1Midnight = SDate("2017-10-25T00:00:00Z") val d1t0000Millis = day1Midnight.millisSinceEpoch val d1t0015Millis = day1Midnight.addMinutes(15).millisSinceEpoch val d1t0030Millis = day1Midnight.addMinutes(30).millisSinceEpoch val d1t0045Millis = day1Midnight.addMinutes(45).millisSinceEpoch val d1t0100Millis = day1Midnight.addMinutes(60).millisSinceEpoch val day2Midnight = day1Midnight.addDays(1) val d2t0000Millis = day2Midnight.millisSinceEpoch val d2t0015Millis = day2Midnight.addMinutes(15).millisSinceEpoch val d2t0030Millis = day2Midnight.addMinutes(30).millisSinceEpoch val d2t0045Millis = day2Midnight.addMinutes(45).millisSinceEpoch val d2t0100Millis = day2Midnight.addMinutes(60).millisSinceEpoch val day3Midnight = day2Midnight.addDays(1) val d3t0000Millis = day3Midnight.millisSinceEpoch val d3t0015Millis = day3Midnight.addMinutes(15).millisSinceEpoch val d3t0030Millis = day3Midnight.addMinutes(30).millisSinceEpoch val d3t0045Millis = day3Midnight.addMinutes(45).millisSinceEpoch val d3t0100Millis = day3Midnight.addMinutes(60).millisSinceEpoch val forecast = ForecastPeriod(Map( d1t0000Millis -> Seq( ForecastTimeSlot(d1t0000Millis, 1, 2), ForecastTimeSlot(d1t0015Millis, 3, 4), ForecastTimeSlot(d1t0030Millis, 5, 6), ForecastTimeSlot(d1t0045Millis, 7, 8), ForecastTimeSlot(d1t0100Millis, 9, 10) ), d2t0000Millis -> Seq( ForecastTimeSlot(d2t0000Millis, 1, 2), ForecastTimeSlot(d2t0015Millis, 3, 4), ForecastTimeSlot(d2t0030Millis, 5, 6), ForecastTimeSlot(d2t0045Millis, 7, 8), ForecastTimeSlot(d2t0100Millis, 9, 10) ), d3t0000Millis -> Seq( ForecastTimeSlot(d3t0000Millis, 1, 2), ForecastTimeSlot(d3t0015Millis, 3, 4), ForecastTimeSlot(d3t0030Millis, 5, 6), ForecastTimeSlot(d3t0045Millis, 7, 8), ForecastTimeSlot(d3t0100Millis, 9, 10) ) )) val result = CSVData.forecastPeriodToCsv(forecast) val dt1 = s"""${day1Midnight.getDate()}/${day1Midnight.getMonth()}""" val dt2 = s"""${day2Midnight.getDate()}/${day2Midnight.getMonth()}""" val dt3 = s"""${day3Midnight.getDate()}/${day3Midnight.getMonth()}""" val expected = s"""|,$dt1 - available,$dt1 - required,$dt1 - difference,$dt2 - available,$dt2 - required,$dt2 - difference,$dt3 - available,$dt3 - required,$dt3 - difference |01:00,1,2,-1,1,2,-1,1,2,-1 |01:15,3,4,-1,3,4,-1,3,4,-1 |01:30,5,6,-1,5,6,-1,5,6,-1 |01:45,7,8,-1,7,8,-1,7,8,-1 |02:00,9,10,-1,9,10,-1,9,10,-1""".stripMargin result === expected } } "Given a ForecastPeriod which includes a period that spans a timezone change from BST to UTC " + "Then I should get back a rectangular List of Lists of ForecastTimeSlot Options containing 100 slots each" >> { val forecastPeriodDays: Map[MillisSinceEpoch, Seq[ForecastTimeSlot]] = forecastForPeriodStartingOnDay(2, SDate("2018-10-28T00:00:00Z")).days val result = Forecast.handleBSTToUTC(forecastPeriodDays).toList.map(_._2.length) val expected = 100 :: 100 :: Nil result === expected } "Given a ForecastPeriod which includes a period that spans a timezone change from BST to UTC " + "Then row headings should include 2 entries for 1am" >> { val forecastPeriod = forecastForPeriodStartingOnDay(2, SDate("2018-10-28T00:00:00Z")) val result = Forecast.timeSlotStartTimes(forecastPeriod, CSVData.millisToHoursAndMinutesString).take(12) val expected = List( "00:00", "00:15", "00:30", "00:45", "01:00", "01:15", "01:30", "01:45", "01:00", "01:15", "01:30", "01:45" ) result === expected } "Given a ForecastPeriod which includes a period that spans a timezone change from BST to UTC " + "Then I should get a CSV with two rows for 1am with empty values for all days other than the timezone change day" >> { val forecastPeriodDays: ForecastPeriod = forecastForPeriodStartingOnDay(2, SDate("2018-10-28T00:00:00Z")) val result = CSVData.forecastPeriodToCsv(forecastPeriodDays) val expected = s"""|,28/10 - available,28/10 - required,28/10 - difference,29/10 - available,29/10 - required,29/10 - difference |00:00,1,1,0,1,1,0 |00:15,1,1,0,1,1,0 |00:30,1,1,0,1,1,0 |00:45,1,1,0,1,1,0 |01:00,1,1,0,1,1,0 |01:15,1,1,0,1,1,0 |01:30,1,1,0,1,1,0 |01:45,1,1,0,1,1,0 |01:00,1,1,0,,, |01:15,1,1,0,,, |01:30,1,1,0,,, |01:45,1,1,0,,, |02:00,1,1,0,1,1,0 |02:15,1,1,0,1,1,0 |02:30,1,1,0,1,1,0 |02:45,1,1,0,1,1,0 |03:00,1,1,0,1,1,0 |03:15,1,1,0,1,1,0 |03:30,1,1,0,1,1,0 |03:45,1,1,0,1,1,0 |04:00,1,1,0,1,1,0 |04:15,1,1,0,1,1,0 |04:30,1,1,0,1,1,0 |04:45,1,1,0,1,1,0 |05:00,1,1,0,1,1,0 |05:15,1,1,0,1,1,0 |05:30,1,1,0,1,1,0 |05:45,1,1,0,1,1,0 |06:00,1,1,0,1,1,0 |06:15,1,1,0,1,1,0 |06:30,1,1,0,1,1,0 |06:45,1,1,0,1,1,0 |07:00,1,1,0,1,1,0 |07:15,1,1,0,1,1,0 |07:30,1,1,0,1,1,0 |07:45,1,1,0,1,1,0 |08:00,1,1,0,1,1,0 |08:15,1,1,0,1,1,0 |08:30,1,1,0,1,1,0 |08:45,1,1,0,1,1,0 |09:00,1,1,0,1,1,0 |09:15,1,1,0,1,1,0 |09:30,1,1,0,1,1,0 |09:45,1,1,0,1,1,0 |10:00,1,1,0,1,1,0 |10:15,1,1,0,1,1,0 |10:30,1,1,0,1,1,0 |10:45,1,1,0,1,1,0 |11:00,1,1,0,1,1,0 |11:15,1,1,0,1,1,0 |11:30,1,1,0,1,1,0 |11:45,1,1,0,1,1,0 |12:00,1,1,0,1,1,0 |12:15,1,1,0,1,1,0 |12:30,1,1,0,1,1,0 |12:45,1,1,0,1,1,0 |13:00,1,1,0,1,1,0 |13:15,1,1,0,1,1,0 |13:30,1,1,0,1,1,0 |13:45,1,1,0,1,1,0 |14:00,1,1,0,1,1,0 |14:15,1,1,0,1,1,0 |14:30,1,1,0,1,1,0 |14:45,1,1,0,1,1,0 |15:00,1,1,0,1,1,0 |15:15,1,1,0,1,1,0 |15:30,1,1,0,1,1,0 |15:45,1,1,0,1,1,0 |16:00,1,1,0,1,1,0 |16:15,1,1,0,1,1,0 |16:30,1,1,0,1,1,0 |16:45,1,1,0,1,1,0 |17:00,1,1,0,1,1,0 |17:15,1,1,0,1,1,0 |17:30,1,1,0,1,1,0 |17:45,1,1,0,1,1,0 |18:00,1,1,0,1,1,0 |18:15,1,1,0,1,1,0 |18:30,1,1,0,1,1,0 |18:45,1,1,0,1,1,0 |19:00,1,1,0,1,1,0 |19:15,1,1,0,1,1,0 |19:30,1,1,0,1,1,0 |19:45,1,1,0,1,1,0 |20:00,1,1,0,1,1,0 |20:15,1,1,0,1,1,0 |20:30,1,1,0,1,1,0 |20:45,1,1,0,1,1,0 |21:00,1,1,0,1,1,0 |21:15,1,1,0,1,1,0 |21:30,1,1,0,1,1,0 |21:45,1,1,0,1,1,0 |22:00,1,1,0,1,1,0 |22:15,1,1,0,1,1,0 |22:30,1,1,0,1,1,0 |22:45,1,1,0,1,1,0 |23:00,1,1,0,1,1,0 |23:15,1,1,0,1,1,0 |23:30,1,1,0,1,1,0 |23:45,1,1,0,1,1,0""".stripMargin result === expected } "Given a ForecastPeriod which includes a period that spans a timezone change from BST to UTC starting before the switch date " + "Then I should get a CSV with two rows for 1am with empty values for all days other than the timezone change day" >> { val forecastPeriodDays: ForecastPeriod = forecastForPeriodStartingOnDay(3, SDate("2018-10-27T00:00:00Z")) val result = CSVData.forecastPeriodToCsv(forecastPeriodDays) val expected = s"""|,27/10 - available,27/10 - required,27/10 - difference,28/10 - available,28/10 - required,28/10 - difference,29/10 - available,29/10 - required,29/10 - difference |00:00,1,1,0,1,1,0,1,1,0 |00:15,1,1,0,1,1,0,1,1,0 |00:30,1,1,0,1,1,0,1,1,0 |00:45,1,1,0,1,1,0,1,1,0 |01:00,1,1,0,1,1,0,1,1,0 |01:15,1,1,0,1,1,0,1,1,0 |01:30,1,1,0,1,1,0,1,1,0 |01:45,1,1,0,1,1,0,1,1,0 |01:00,,,,1,1,0,,, |01:15,,,,1,1,0,,, |01:30,,,,1,1,0,,, |01:45,,,,1,1,0,,, |02:00,1,1,0,1,1,0,1,1,0 |02:15,1,1,0,1,1,0,1,1,0 |02:30,1,1,0,1,1,0,1,1,0 |02:45,1,1,0,1,1,0,1,1,0 |03:00,1,1,0,1,1,0,1,1,0 |03:15,1,1,0,1,1,0,1,1,0 |03:30,1,1,0,1,1,0,1,1,0 |03:45,1,1,0,1,1,0,1,1,0 |04:00,1,1,0,1,1,0,1,1,0 |04:15,1,1,0,1,1,0,1,1,0 |04:30,1,1,0,1,1,0,1,1,0 |04:45,1,1,0,1,1,0,1,1,0 |05:00,1,1,0,1,1,0,1,1,0 |05:15,1,1,0,1,1,0,1,1,0 |05:30,1,1,0,1,1,0,1,1,0 |05:45,1,1,0,1,1,0,1,1,0 |06:00,1,1,0,1,1,0,1,1,0 |06:15,1,1,0,1,1,0,1,1,0 |06:30,1,1,0,1,1,0,1,1,0 |06:45,1,1,0,1,1,0,1,1,0 |07:00,1,1,0,1,1,0,1,1,0 |07:15,1,1,0,1,1,0,1,1,0 |07:30,1,1,0,1,1,0,1,1,0 |07:45,1,1,0,1,1,0,1,1,0 |08:00,1,1,0,1,1,0,1,1,0 |08:15,1,1,0,1,1,0,1,1,0 |08:30,1,1,0,1,1,0,1,1,0 |08:45,1,1,0,1,1,0,1,1,0 |09:00,1,1,0,1,1,0,1,1,0 |09:15,1,1,0,1,1,0,1,1,0 |09:30,1,1,0,1,1,0,1,1,0 |09:45,1,1,0,1,1,0,1,1,0 |10:00,1,1,0,1,1,0,1,1,0 |10:15,1,1,0,1,1,0,1,1,0 |10:30,1,1,0,1,1,0,1,1,0 |10:45,1,1,0,1,1,0,1,1,0 |11:00,1,1,0,1,1,0,1,1,0 |11:15,1,1,0,1,1,0,1,1,0 |11:30,1,1,0,1,1,0,1,1,0 |11:45,1,1,0,1,1,0,1,1,0 |12:00,1,1,0,1,1,0,1,1,0 |12:15,1,1,0,1,1,0,1,1,0 |12:30,1,1,0,1,1,0,1,1,0 |12:45,1,1,0,1,1,0,1,1,0 |13:00,1,1,0,1,1,0,1,1,0 |13:15,1,1,0,1,1,0,1,1,0 |13:30,1,1,0,1,1,0,1,1,0 |13:45,1,1,0,1,1,0,1,1,0 |14:00,1,1,0,1,1,0,1,1,0 |14:15,1,1,0,1,1,0,1,1,0 |14:30,1,1,0,1,1,0,1,1,0 |14:45,1,1,0,1,1,0,1,1,0 |15:00,1,1,0,1,1,0,1,1,0 |15:15,1,1,0,1,1,0,1,1,0 |15:30,1,1,0,1,1,0,1,1,0 |15:45,1,1,0,1,1,0,1,1,0 |16:00,1,1,0,1,1,0,1,1,0 |16:15,1,1,0,1,1,0,1,1,0 |16:30,1,1,0,1,1,0,1,1,0 |16:45,1,1,0,1,1,0,1,1,0 |17:00,1,1,0,1,1,0,1,1,0 |17:15,1,1,0,1,1,0,1,1,0 |17:30,1,1,0,1,1,0,1,1,0 |17:45,1,1,0,1,1,0,1,1,0 |18:00,1,1,0,1,1,0,1,1,0 |18:15,1,1,0,1,1,0,1,1,0 |18:30,1,1,0,1,1,0,1,1,0 |18:45,1,1,0,1,1,0,1,1,0 |19:00,1,1,0,1,1,0,1,1,0 |19:15,1,1,0,1,1,0,1,1,0 |19:30,1,1,0,1,1,0,1,1,0 |19:45,1,1,0,1,1,0,1,1,0 |20:00,1,1,0,1,1,0,1,1,0 |20:15,1,1,0,1,1,0,1,1,0 |20:30,1,1,0,1,1,0,1,1,0 |20:45,1,1,0,1,1,0,1,1,0 |21:00,1,1,0,1,1,0,1,1,0 |21:15,1,1,0,1,1,0,1,1,0 |21:30,1,1,0,1,1,0,1,1,0 |21:45,1,1,0,1,1,0,1,1,0 |22:00,1,1,0,1,1,0,1,1,0 |22:15,1,1,0,1,1,0,1,1,0 |22:30,1,1,0,1,1,0,1,1,0 |22:45,1,1,0,1,1,0,1,1,0 |23:00,1,1,0,1,1,0,1,1,0 |23:15,1,1,0,1,1,0,1,1,0 |23:30,1,1,0,1,1,0,1,1,0 |23:45,1,1,0,1,1,0,1,1,0""".stripMargin result === expected } "Given a ForecastPeriod which includes a period that spans a timezone change from UTC to BST " + "Then then the switch over day should have blank entries for the period between 1am and 2am" >> { val forecastPeriodDays: ForecastPeriod = forecastForPeriodStartingOnDay(3, SDate("2019-03-30T00:00:00Z")) val result = CSVData.forecastPeriodToCsv(forecastPeriodDays) val expected = s"""|,30/03 - available,30/03 - required,30/03 - difference,31/03 - available,31/03 - required,31/03 - difference,01/04 - available,01/04 - required,01/04 - difference |00:00,1,1,0,1,1,0,1,1,0 |00:15,1,1,0,1,1,0,1,1,0 |00:30,1,1,0,1,1,0,1,1,0 |00:45,1,1,0,1,1,0,1,1,0 |01:00,1,1,0,,,,1,1,0 |01:15,1,1,0,,,,1,1,0 |01:30,1,1,0,,,,1,1,0 |01:45,1,1,0,,,,1,1,0 |02:00,1,1,0,1,1,0,1,1,0 |02:15,1,1,0,1,1,0,1,1,0 |02:30,1,1,0,1,1,0,1,1,0 |02:45,1,1,0,1,1,0,1,1,0 |03:00,1,1,0,1,1,0,1,1,0 |03:15,1,1,0,1,1,0,1,1,0 |03:30,1,1,0,1,1,0,1,1,0 |03:45,1,1,0,1,1,0,1,1,0 |04:00,1,1,0,1,1,0,1,1,0 |04:15,1,1,0,1,1,0,1,1,0 |04:30,1,1,0,1,1,0,1,1,0 |04:45,1,1,0,1,1,0,1,1,0 |05:00,1,1,0,1,1,0,1,1,0 |05:15,1,1,0,1,1,0,1,1,0 |05:30,1,1,0,1,1,0,1,1,0 |05:45,1,1,0,1,1,0,1,1,0 |06:00,1,1,0,1,1,0,1,1,0 |06:15,1,1,0,1,1,0,1,1,0 |06:30,1,1,0,1,1,0,1,1,0 |06:45,1,1,0,1,1,0,1,1,0 |07:00,1,1,0,1,1,0,1,1,0 |07:15,1,1,0,1,1,0,1,1,0 |07:30,1,1,0,1,1,0,1,1,0 |07:45,1,1,0,1,1,0,1,1,0 |08:00,1,1,0,1,1,0,1,1,0 |08:15,1,1,0,1,1,0,1,1,0 |08:30,1,1,0,1,1,0,1,1,0 |08:45,1,1,0,1,1,0,1,1,0 |09:00,1,1,0,1,1,0,1,1,0 |09:15,1,1,0,1,1,0,1,1,0 |09:30,1,1,0,1,1,0,1,1,0 |09:45,1,1,0,1,1,0,1,1,0 |10:00,1,1,0,1,1,0,1,1,0 |10:15,1,1,0,1,1,0,1,1,0 |10:30,1,1,0,1,1,0,1,1,0 |10:45,1,1,0,1,1,0,1,1,0 |11:00,1,1,0,1,1,0,1,1,0 |11:15,1,1,0,1,1,0,1,1,0 |11:30,1,1,0,1,1,0,1,1,0 |11:45,1,1,0,1,1,0,1,1,0 |12:00,1,1,0,1,1,0,1,1,0 |12:15,1,1,0,1,1,0,1,1,0 |12:30,1,1,0,1,1,0,1,1,0 |12:45,1,1,0,1,1,0,1,1,0 |13:00,1,1,0,1,1,0,1,1,0 |13:15,1,1,0,1,1,0,1,1,0 |13:30,1,1,0,1,1,0,1,1,0 |13:45,1,1,0,1,1,0,1,1,0 |14:00,1,1,0,1,1,0,1,1,0 |14:15,1,1,0,1,1,0,1,1,0 |14:30,1,1,0,1,1,0,1,1,0 |14:45,1,1,0,1,1,0,1,1,0 |15:00,1,1,0,1,1,0,1,1,0 |15:15,1,1,0,1,1,0,1,1,0 |15:30,1,1,0,1,1,0,1,1,0 |15:45,1,1,0,1,1,0,1,1,0 |16:00,1,1,0,1,1,0,1,1,0 |16:15,1,1,0,1,1,0,1,1,0 |16:30,1,1,0,1,1,0,1,1,0 |16:45,1,1,0,1,1,0,1,1,0 |17:00,1,1,0,1,1,0,1,1,0 |17:15,1,1,0,1,1,0,1,1,0 |17:30,1,1,0,1,1,0,1,1,0 |17:45,1,1,0,1,1,0,1,1,0 |18:00,1,1,0,1,1,0,1,1,0 |18:15,1,1,0,1,1,0,1,1,0 |18:30,1,1,0,1,1,0,1,1,0 |18:45,1,1,0,1,1,0,1,1,0 |19:00,1,1,0,1,1,0,1,1,0 |19:15,1,1,0,1,1,0,1,1,0 |19:30,1,1,0,1,1,0,1,1,0 |19:45,1,1,0,1,1,0,1,1,0 |20:00,1,1,0,1,1,0,1,1,0 |20:15,1,1,0,1,1,0,1,1,0 |20:30,1,1,0,1,1,0,1,1,0 |20:45,1,1,0,1,1,0,1,1,0 |21:00,1,1,0,1,1,0,1,1,0 |21:15,1,1,0,1,1,0,1,1,0 |21:30,1,1,0,1,1,0,1,1,0 |21:45,1,1,0,1,1,0,1,1,0 |22:00,1,1,0,1,1,0,1,1,0 |22:15,1,1,0,1,1,0,1,1,0 |22:30,1,1,0,1,1,0,1,1,0 |22:45,1,1,0,1,1,0,1,1,0 |23:00,1,1,0,1,1,0,1,1,0 |23:15,1,1,0,1,1,0,1,1,0 |23:30,1,1,0,1,1,0,1,1,0 |23:45,1,1,0,1,1,0,1,1,0""".stripMargin result === expected } def forecastForPeriodStartingOnDay(daysToAdd: Int, startDate: SDateLike): ForecastPeriod = { val forecastStart = startDate.getLocalLastMidnight val forecastEnd = forecastStart.addDays(daysToAdd) val range = forecastStart.millisSinceEpoch until forecastEnd.millisSinceEpoch by (15 * 60 * 1000) val days = range.toList.groupBy(m => SDate(m).getLocalLastMidnight.millisSinceEpoch) ForecastPeriod(days.mapValues(_.map(ts => ForecastTimeSlot(ts, 1, 1)))) } "Forecast Headline Export" >> { "Given forecast headline figures for 3 days, then I should get those days exported as a CSV" >> { val day1StartMinute = SDate("2017-01-01T00:00Z") val day2StartMinute = SDate("2017-01-02T00:00Z") val day3StartMinute = SDate("2017-01-03T00:00Z") val headlines = ForecastHeadlineFigures(Seq( QueueHeadline(day1StartMinute.millisSinceEpoch, Queues.EeaDesk, 1, 2), QueueHeadline(day1StartMinute.millisSinceEpoch, Queues.EGate, 1, 2), QueueHeadline(day2StartMinute.millisSinceEpoch, Queues.EeaDesk, 1, 2), QueueHeadline(day2StartMinute.millisSinceEpoch, Queues.EGate, 1, 2), QueueHeadline(day3StartMinute.millisSinceEpoch, Queues.EeaDesk, 1, 2), QueueHeadline(day3StartMinute.millisSinceEpoch, Queues.EGate, 1, 2) )) val result = CSVData.forecastHeadlineToCSV(headlines, Queues.forecastExportQueueOrderSansFastTrack) val expected = f"""|,${day1StartMinute.getDate()}%02d/${day1StartMinute.getMonth()}%02d,${day2StartMinute.getDate()}%02d/${day2StartMinute.getMonth()}%02d,${day3StartMinute.getDate()}%02d/${day3StartMinute.getMonth()}%02d |Total Pax,2,2,2 |EEA,1,1,1 |e-Gates,1,1,1 |Total Workload,4,4,4""".stripMargin result === expected } } }
UKHomeOffice/drt-scalajs-spa-exploration
server/src/test/scala/services/crunch/ForecastPlanningToCSVDataTest.scala
Scala
apache-2.0
19,132
package muster package codec package json4s import org.json4s.JsonAST._ import scala.util.Try package object api { implicit class JValueProducingObject[T:Producer](p: T) { def asJValue = Json4sCodec.from(p) } implicit class JValueConsumingObject(jv: JValue) { @inline def as[T](implicit consumer: Consumer[T]) = Json4sCodec.as[T](jv) @inline def tryAs[T](implicit consumer: Consumer[T]) = Try(as[T]) @inline def getAs[T](implicit consumer: Consumer[T]) = jv match { case JNull | JNothing => None case _ => tryAs[T].toOption } } }
json4s/muster
codecs/json4s/src/main/scala/muster/codec/json4s/api/package.scala
Scala
mit
574
import scala.language.implicitConversions class LazyList[A] object LazyList { inline implicit def toDeferred[A](l: LazyList[A]): Deferred[A] = new Deferred(l) final class Deferred[A](l: => LazyList[A]) { def #:: [B >: A](elem: => B): LazyList[B] = ??? } } import LazyList.* final class Test { lazy val a: LazyList[Int] = 5 #:: b lazy val b: LazyList[Int] = 10 #:: a val x: LazyList[Int] = 5 #:: y val y: LazyList[Int] = 10 #:: x }
dotty-staging/dotty
tests/init/pos/lazylist1.scala
Scala
apache-2.0
458
import leon.lang._ import leon.annotation._ import leon.collection._ import leon._ object Trees { abstract class Expr case class Plus(lhs: Expr, rhs: Expr) extends Expr case class Minus(lhs: Expr, rhs: Expr) extends Expr case class LessThan(lhs: Expr, rhs: Expr) extends Expr case class And(lhs: Expr, rhs: Expr) extends Expr case class Or(lhs: Expr, rhs: Expr) extends Expr case class Not(e : Expr) extends Expr case class Eq(lhs: Expr, rhs: Expr) extends Expr case class Ite(cond: Expr, thn: Expr, els: Expr) extends Expr case class IntLiteral(v: BigInt) extends Expr case class BoolLiteral(b : Boolean) extends Expr } object Types { abstract class Type case object IntType extends Type case object BoolType extends Type } object TypeChecker { import Trees._ import Types._ def typeOf(e :Expr) : Option[Type] = e match { case Plus(l,r) => (typeOf(l), typeOf(r)) match { case (Some(IntType), Some(IntType)) => Some(IntType) case _ => None() } case Minus(l,r) => (typeOf(l), typeOf(r)) match { case (Some(IntType), Some(IntType)) => Some(IntType) case _ => None() } case LessThan(l,r) => ( typeOf(l), typeOf(r)) match { case (Some(IntType), Some(IntType)) => Some(BoolType) case _ => None() } case And(l,r) => ( typeOf(l), typeOf(r)) match { case (Some(BoolType), Some(BoolType)) => Some(BoolType) case _ => None() } case Or(l,r) => ( typeOf(l), typeOf(r)) match { case (Some(BoolType), Some(BoolType)) => Some(BoolType) case _ => None() } case Not(e) => typeOf(e) match { case Some(BoolType) => Some(BoolType) case _ => None() } case Eq(lhs, rhs) => (typeOf(lhs), typeOf(rhs)) match { case (Some(t1), Some(t2)) if t1 == t2 => Some(BoolType) case _ => None() } case Ite(c, th, el) => (typeOf(c), typeOf(th), typeOf(el)) match { case (Some(BoolType), Some(t1), Some(t2)) if t1 == t2 => Some(t1) case _ => None() } case IntLiteral(_) => Some(IntType) case BoolLiteral(_) => Some(BoolType) } def typeChecks(e : Expr) = typeOf(e).isDefined } object Semantics { import Trees._ import Types._ import TypeChecker._ def semI(t : Expr) : BigInt = { require( typeOf(t) == ( Some(IntType) : Option[Type] )) t match { case Plus(lhs , rhs) => semI(lhs) + semI(rhs) case Minus(lhs , rhs) => semI(lhs) - semI(rhs) case Ite(cond, thn, els) => if (semB(cond)) semI(thn) else semI(els) case IntLiteral(v) => v } } def semB(t : Expr) : Boolean = { require( (Some(BoolType): Option[Type]) == typeOf(t)) t match { case And(lhs, rhs ) => semB(lhs) && semB(rhs) case Or(lhs , rhs ) => semB(lhs) || semB(rhs) case Not(e) => !semB(e) case LessThan(lhs, rhs) => semI(lhs) < semI(rhs) case Ite(cond, thn, els) => if (semB(cond)) semB(thn) else semB(els) case Eq(lhs, rhs) => (typeOf(lhs), typeOf(rhs)) match { case ( Some(IntType), Some(IntType) ) => semI(lhs) == semI(rhs) case ( Some(BoolType), Some(BoolType) ) => semB(lhs) == semB(rhs) } case BoolLiteral(b) => b } } def b2i(b : Boolean): BigInt = if (b) 1 else 0 @induct def semUntyped( t : Expr) : BigInt = { t match { case Plus (lhs, rhs) => semUntyped(lhs) + semUntyped(rhs) case Minus(lhs, rhs) => semUntyped(lhs) - semUntyped(rhs) case And (lhs, rhs) => if (semUntyped(lhs)!=0) semUntyped(rhs) else BigInt(0) case Or(lhs, rhs ) => if (semUntyped(lhs) == 0) semUntyped(rhs) else BigInt(1) case Not(e) => b2i(semUntyped(e) == 0) case LessThan(lhs, rhs) => b2i(semUntyped(lhs) < semUntyped(rhs)) case Eq(lhs, rhs) => b2i(semUntyped(lhs) == semUntyped(rhs)) case Ite(cond, thn, els) => if (semUntyped(cond) == 0) semUntyped(els) else semUntyped(thn) case IntLiteral(v) => v case BoolLiteral(b) => b2i(b) }} ensuring { res => typeOf(t) match { case Some(IntType) => res == semI(t) case Some(BoolType) => res == b2i(semB(t)) case None() => true }} } object Desugar { import Types._ import TypeChecker._ import Semantics.b2i abstract class SimpleE case class Plus(lhs : SimpleE, rhs : SimpleE) extends SimpleE case class Neg(arg : SimpleE) extends SimpleE case class Ite(cond : SimpleE, thn : SimpleE, els : SimpleE) extends SimpleE case class Eq(lhs : SimpleE, rhs : SimpleE) extends SimpleE case class LessThan(lhs : SimpleE, rhs : SimpleE) extends SimpleE case class Literal(i : BigInt) extends SimpleE @induct def desugar(e : Trees.Expr) : SimpleE = { e match { case Trees.Plus (lhs, rhs) => Plus(desugar(lhs), desugar(rhs)) case Trees.Minus(lhs, rhs) => Plus(desugar(lhs), Neg(desugar(rhs))) case Trees.LessThan(lhs, rhs) => LessThan(desugar(lhs), desugar(rhs)) case Trees.And (lhs, rhs) => Ite(desugar(lhs), desugar(rhs), Literal(0)) case Trees.Or (lhs, rhs) => Ite(desugar(lhs), Literal(1), desugar(rhs)) case Trees.Not(e) => Ite(desugar(e), Literal(0), Literal(1)) case Trees.Eq(lhs, rhs) => Eq(desugar(lhs), desugar(rhs)) case Trees.Ite(cond, thn, els) => Ite(desugar(cond), desugar(els), desugar(thn)) // FIXME case Trees.IntLiteral(v) => Literal(v) case Trees.BoolLiteral(b) => Literal(b2i(b)) }} ensuring { res => sem(res) == Semantics.semUntyped(e) } def sem(e : SimpleE) : BigInt = e match { case Plus (lhs, rhs) => sem(lhs) + sem(rhs) case Ite(cond, thn, els) => if (sem(cond) != 0) sem(thn) else sem(els) case Neg(arg) => -sem(arg) case Eq(lhs,rhs) => b2i(sem(lhs) == sem(rhs)) case LessThan(lhs, rhs) => b2i(sem(lhs) < sem(rhs)) case Literal(i) => i } } object Evaluator { import Trees._ def bToi(b: Boolean): BigInt = if (b) 1 else 0 def iTob(i: BigInt) = i == 1 def eval(e: Expr): BigInt = { e match { case Plus(lhs, rhs) => eval(lhs) + eval(rhs) case Minus(lhs, rhs) => eval(lhs) + eval(rhs) case LessThan(lhs, rhs) => bToi(eval(lhs) < eval(rhs)) case And(lhs, rhs) => bToi(iTob(eval(lhs)) && iTob(eval(rhs))) case Or(lhs, rhs) => bToi(iTob(eval(lhs)) || iTob(eval(rhs))) case Not(e) => bToi(!iTob(eval(e))) case Eq(lhs, rhs) => bToi(eval(lhs) == eval(rhs)) case Ite(cond, thn, els) => if (iTob(eval(cond))) eval(thn) else eval(els) case IntLiteral(v) => v case BoolLiteral(b) => bToi(b) } } } object Simplifier { import Trees._ import Evaluator._ @induct def simplify(e: Expr): Expr = { e match { case And(BoolLiteral(false), _) => BoolLiteral(false) case Or(BoolLiteral(true), _) => BoolLiteral(true) case Plus(IntLiteral(a), IntLiteral(b)) => IntLiteral(a+b) case Not(Not(Not(a))) => Not(a) case e => e } } ensuring { res => eval(res) == eval(e) } }
regb/leon
testcases/synt2016/repair/Compiler/Compiler3.scala
Scala
gpl-3.0
7,024
package org.jetbrains.plugins.scala.testingSupport.specs2.specs2_2_12_4_0_0 import org.jetbrains.plugins.scala.SlowTests import org.jetbrains.plugins.scala.testingSupport.specs2.Specs2PackageTest import org.junit.experimental.categories.Category /** * @author Roman.Shein * @since 06.09.2015. */ @Category(Array(classOf[SlowTests])) class Specs2_2_12_4_0_0_PackageTest extends Specs2PackageTest with Specs2_2_12_4_0_0_Base
triplequote/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/specs2/specs2_2_12_4_0_0/Specs2_2_12_4_0_0_PackageTest.scala
Scala
apache-2.0
431
def map[B](f: A => B): Stream[B] = foldRight(empty[B])((h,t) => cons(f(h), t)) def filter[B](f: A => Boolean): Stream[A] = foldRight(empty[A])((h,t) => if (f(h)) cons(h, t) else t) def append[B>:A](s: Stream[B]): Stream[B] = foldRight(s)((h,t) => cons(h,t)) def flatMap[B](f: A => Stream[B]): Stream[B] = foldRight(empty[B])((h,t) => f(h) append t)
ShokuninSan/fpinscala
answerkey/laziness/6.answer.scala
Scala
mit
372
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import java.util import java.util.concurrent.TimeUnit.{MILLISECONDS, NANOSECONDS} import java.util.concurrent.CompletableFuture import kafka.utils.Logging import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.Uuid import org.apache.kafka.common.message.BrokerRegistrationRequestData.ListenerCollection import org.apache.kafka.common.message.{BrokerHeartbeatRequestData, BrokerRegistrationRequestData} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{BrokerHeartbeatRequest, BrokerHeartbeatResponse, BrokerRegistrationRequest, BrokerRegistrationResponse} import org.apache.kafka.metadata.{BrokerState, VersionRange} import org.apache.kafka.queue.EventQueue.DeadlineFunction import org.apache.kafka.common.utils.{ExponentialBackoff, LogContext, Time} import org.apache.kafka.queue.{EventQueue, KafkaEventQueue} import scala.jdk.CollectionConverters._ /** * The broker lifecycle manager owns the broker state. * * Its inputs are messages passed in from other parts of the broker and from the * controller: requests to start up, or shut down, for example. Its output are the broker * state and various futures that can be used to wait for broker state transitions to * occur. * * The lifecycle manager handles registering the broker with the controller, as described * in KIP-631. After registration is complete, it handles sending periodic broker * heartbeats and processing the responses. * * This code uses an event queue paradigm. Modifications get translated into events, which * are placed on the queue to be processed sequentially. As described in the JavaDoc for * each variable, most mutable state can be accessed only from that event queue thread. * In some cases we expose a volatile variable which can be read from any thread, but only * written from the event queue thread. */ class BrokerLifecycleManager(val config: KafkaConfig, val time: Time, val threadNamePrefix: Option[String]) extends Logging { val logContext = new LogContext(s"[BrokerLifecycleManager id=${config.nodeId}] ") this.logIdent = logContext.logPrefix() /** * The broker id. */ private val nodeId = config.nodeId /** * The broker rack, or null if there is no configured rack. */ private val rack = config.rack /** * How long to wait for registration to succeed before failing the startup process. */ private val initialTimeoutNs = MILLISECONDS.toNanos(config.initialRegistrationTimeoutMs.longValue()) /** * The exponential backoff to use for resending communication. */ private val resendExponentialBackoff = new ExponentialBackoff(100, 2, config.brokerSessionTimeoutMs.toLong, 0.02) /** * The number of times we've tried and failed to communicate. This variable can only be * read or written from the event queue thread. */ private var failedAttempts = 0L /** * The broker incarnation ID. This ID uniquely identifies each time we start the broker */ val incarnationId = Uuid.randomUuid() /** * A future which is completed just as soon as the broker has caught up with the latest * metadata offset for the first time. */ val initialCatchUpFuture = new CompletableFuture[Void]() /** * A future which is completed when controlled shutdown is done. */ val controlledShutdownFuture = new CompletableFuture[Void]() /** * The broker epoch, or -1 if the broker has not yet registered. This variable can only * be written from the event queue thread. */ @volatile private var _brokerEpoch = -1L /** * The current broker state. This variable can only be written from the event queue * thread. */ @volatile private var _state = BrokerState.NOT_RUNNING /** * A thread-safe callback function which gives this manager the current highest metadata * offset. This variable can only be read or written from the event queue thread. */ private var _highestMetadataOffsetProvider: () => Long = _ /** * True only if we are ready to unfence the broker. This variable can only be read or * written from the event queue thread. */ private var readyToUnfence = false /** * True if we sent a event queue to the active controller requesting controlled * shutdown. This variable can only be read or written from the event queue thread. */ private var gotControlledShutdownResponse = false /** * Whether or not this broker is registered with the controller quorum. * This variable can only be read or written from the event queue thread. */ private var registered = false /** * True if the initial registration succeeded. This variable can only be read or * written from the event queue thread. */ private var initialRegistrationSucceeded = false /** * The cluster ID, or null if this manager has not been started yet. This variable can * only be read or written from the event queue thread. */ private var _clusterId: String = _ /** * The listeners which this broker advertises. This variable can only be read or * written from the event queue thread. */ private var _advertisedListeners: ListenerCollection = _ /** * The features supported by this broker. This variable can only be read or written * from the event queue thread. */ private var _supportedFeatures: util.Map[String, VersionRange] = _ /** * The channel manager, or null if this manager has not been started yet. This variable * can only be read or written from the event queue thread. */ private var _channelManager: BrokerToControllerChannelManager = _ /** * The event queue. */ private[server] val eventQueue = new KafkaEventQueue(time, logContext, threadNamePrefix.getOrElse("")) /** * Start the BrokerLifecycleManager. * * @param highestMetadataOffsetProvider Provides the current highest metadata offset. * @param channelManager The brokerToControllerChannelManager to use. * @param clusterId The cluster ID. */ def start(highestMetadataOffsetProvider: () => Long, channelManager: BrokerToControllerChannelManager, clusterId: String, advertisedListeners: ListenerCollection, supportedFeatures: util.Map[String, VersionRange]): Unit = { eventQueue.append(new StartupEvent(highestMetadataOffsetProvider, channelManager, clusterId, advertisedListeners, supportedFeatures)) } def setReadyToUnfence(): Unit = { eventQueue.append(new SetReadyToUnfenceEvent()) } def brokerEpoch: Long = _brokerEpoch def state: BrokerState = _state private class BeginControlledShutdownEvent extends EventQueue.Event { override def run(): Unit = { _state match { case BrokerState.PENDING_CONTROLLED_SHUTDOWN => info("Attempted to enter pending controlled shutdown state, but we are " + "already in that state.") case BrokerState.RUNNING => info("Beginning controlled shutdown.") _state = BrokerState.PENDING_CONTROLLED_SHUTDOWN // Send the next heartbeat immediately in order to let the controller // begin processing the controlled shutdown as soon as possible. scheduleNextCommunication(0) case _ => info(s"Skipping controlled shutdown because we are in state ${_state}.") beginShutdown() } } } /** * Enter the controlled shutdown state if we are in RUNNING state. * Or, if we're not running, shut down immediately. */ def beginControlledShutdown(): Unit = { eventQueue.append(new BeginControlledShutdownEvent()) } /** * Start shutting down the BrokerLifecycleManager, but do not block. */ def beginShutdown(): Unit = { eventQueue.beginShutdown("beginShutdown", new ShutdownEvent()) } /** * Shut down the BrokerLifecycleManager and block until all threads are joined. */ def close(): Unit = { beginShutdown() eventQueue.close() } private class SetReadyToUnfenceEvent() extends EventQueue.Event { override def run(): Unit = { readyToUnfence = true scheduleNextCommunicationImmediately() } } private class StartupEvent(highestMetadataOffsetProvider: () => Long, channelManager: BrokerToControllerChannelManager, clusterId: String, advertisedListeners: ListenerCollection, supportedFeatures: util.Map[String, VersionRange]) extends EventQueue.Event { override def run(): Unit = { _highestMetadataOffsetProvider = highestMetadataOffsetProvider _channelManager = channelManager _channelManager.start() _state = BrokerState.STARTING _clusterId = clusterId _advertisedListeners = advertisedListeners.duplicate() _supportedFeatures = new util.HashMap[String, VersionRange](supportedFeatures) eventQueue.scheduleDeferred("initialRegistrationTimeout", new DeadlineFunction(time.nanoseconds() + initialTimeoutNs), new RegistrationTimeoutEvent()) sendBrokerRegistration() info(s"Incarnation ${incarnationId} of broker ${nodeId} in cluster ${clusterId} " + "is now STARTING.") } } private def sendBrokerRegistration(): Unit = { val features = new BrokerRegistrationRequestData.FeatureCollection() _supportedFeatures.asScala.foreach { case (name, range) => features.add(new BrokerRegistrationRequestData.Feature(). setName(name). setMinSupportedVersion(range.min()). setMaxSupportedVersion(range.max())) } val data = new BrokerRegistrationRequestData(). setBrokerId(nodeId). setClusterId(_clusterId). setFeatures(features). setIncarnationId(incarnationId). setListeners(_advertisedListeners). setRack(rack.orNull) if (isTraceEnabled) { trace(s"Sending broker registration ${data}") } _channelManager.sendRequest(new BrokerRegistrationRequest.Builder(data), new BrokerRegistrationResponseHandler()) } private class BrokerRegistrationResponseHandler extends ControllerRequestCompletionHandler { override def onComplete(response: ClientResponse): Unit = { if (response.authenticationException() != null) { error(s"Unable to register broker ${nodeId} because of an authentication exception.", response.authenticationException()); scheduleNextCommunicationAfterFailure() } else if (response.versionMismatch() != null) { error(s"Unable to register broker ${nodeId} because of an API version problem.", response.versionMismatch()); scheduleNextCommunicationAfterFailure() } else if (response.responseBody() == null) { warn(s"Unable to register broker ${nodeId}.") scheduleNextCommunicationAfterFailure() } else if (!response.responseBody().isInstanceOf[BrokerRegistrationResponse]) { error(s"Unable to register broker ${nodeId} because the controller returned an " + "invalid response type.") scheduleNextCommunicationAfterFailure() } else { val message = response.responseBody().asInstanceOf[BrokerRegistrationResponse] val errorCode = Errors.forCode(message.data().errorCode()) if (errorCode == Errors.NONE) { failedAttempts = 0 _brokerEpoch = message.data().brokerEpoch() registered = true initialRegistrationSucceeded = true info(s"Successfully registered broker ${nodeId} with broker epoch ${_brokerEpoch}") scheduleNextCommunicationImmediately() // Immediately send a heartbeat } else { info(s"Unable to register broker ${nodeId} because the controller returned " + s"error ${errorCode}") scheduleNextCommunicationAfterFailure() } } } override def onTimeout(): Unit = { info(s"Unable to register the broker because the RPC got timed out before it could be sent.") scheduleNextCommunicationAfterFailure() } } private def sendBrokerHeartbeat(): Unit = { val metadataOffset = _highestMetadataOffsetProvider() val data = new BrokerHeartbeatRequestData(). setBrokerEpoch(_brokerEpoch). setBrokerId(nodeId). setCurrentMetadataOffset(metadataOffset). setWantFence(!readyToUnfence). setWantShutDown(_state == BrokerState.PENDING_CONTROLLED_SHUTDOWN) if (isTraceEnabled) { trace(s"Sending broker heartbeat ${data}") } _channelManager.sendRequest(new BrokerHeartbeatRequest.Builder(data), new BrokerHeartbeatResponseHandler()) } private class BrokerHeartbeatResponseHandler extends ControllerRequestCompletionHandler { override def onComplete(response: ClientResponse): Unit = { if (response.authenticationException() != null) { error(s"Unable to send broker heartbeat for ${nodeId} because of an " + "authentication exception.", response.authenticationException()); scheduleNextCommunicationAfterFailure() } else if (response.versionMismatch() != null) { error(s"Unable to send broker heartbeat for ${nodeId} because of an API " + "version problem.", response.versionMismatch()); scheduleNextCommunicationAfterFailure() } else if (response.responseBody() == null) { warn(s"Unable to send broker heartbeat for ${nodeId}. Retrying.") scheduleNextCommunicationAfterFailure() } else if (!response.responseBody().isInstanceOf[BrokerHeartbeatResponse]) { error(s"Unable to send broker heartbeat for ${nodeId} because the controller " + "returned an invalid response type.") scheduleNextCommunicationAfterFailure() } else { val message = response.responseBody().asInstanceOf[BrokerHeartbeatResponse] val errorCode = Errors.forCode(message.data().errorCode()) if (errorCode == Errors.NONE) { failedAttempts = 0 _state match { case BrokerState.STARTING => if (message.data().isCaughtUp()) { info(s"The broker has caught up. Transitioning from STARTING to RECOVERY.") _state = BrokerState.RECOVERY initialCatchUpFuture.complete(null) } else { debug(s"The broker is STARTING. Still waiting to catch up with cluster metadata.") } // Schedule the heartbeat after only 10 ms so that in the case where // there is no recovery work to be done, we start up a bit quicker. scheduleNextCommunication(NANOSECONDS.convert(10, MILLISECONDS)) case BrokerState.RECOVERY => if (!message.data().isFenced()) { info(s"The broker has been unfenced. Transitioning from RECOVERY to RUNNING.") _state = BrokerState.RUNNING } else { info(s"The broker is in RECOVERY.") } scheduleNextCommunicationAfterSuccess() case BrokerState.RUNNING => debug(s"The broker is RUNNING. Processing heartbeat response.") scheduleNextCommunicationAfterSuccess() case BrokerState.PENDING_CONTROLLED_SHUTDOWN => if (!message.data().shouldShutDown()) { info(s"The broker is in PENDING_CONTROLLED_SHUTDOWN state, still waiting " + "for the active controller.") if (!gotControlledShutdownResponse) { // If this is the first pending controlled shutdown response we got, // schedule our next heartbeat a little bit sooner than we usually would. // In the case where controlled shutdown completes quickly, this will // speed things up a little bit. scheduleNextCommunication(NANOSECONDS.convert(50, MILLISECONDS)) } else { scheduleNextCommunicationAfterSuccess() } } else { info(s"The controlled has asked us to exit controlled shutdown.") beginShutdown() } gotControlledShutdownResponse = true case BrokerState.SHUTTING_DOWN => info(s"The broker is SHUTTING_DOWN. Ignoring heartbeat response.") case _ => error(s"Unexpected broker state ${_state}") scheduleNextCommunicationAfterSuccess() } } else { warn(s"Broker ${nodeId} sent a heartbeat request but received error ${errorCode}.") scheduleNextCommunicationAfterFailure() } } } override def onTimeout(): Unit = { info("Unable to send a heartbeat because the RPC got timed out before it could be sent.") scheduleNextCommunicationAfterFailure() } } private def scheduleNextCommunicationImmediately(): Unit = scheduleNextCommunication(0) private def scheduleNextCommunicationAfterFailure(): Unit = { val delayMs = resendExponentialBackoff.backoff(failedAttempts) failedAttempts = failedAttempts + 1 scheduleNextCommunication(NANOSECONDS.convert(delayMs, MILLISECONDS)) } private def scheduleNextCommunicationAfterSuccess(): Unit = { scheduleNextCommunication(NANOSECONDS.convert( config.brokerHeartbeatIntervalMs.longValue() , MILLISECONDS)) } private def scheduleNextCommunication(intervalNs: Long): Unit = { trace(s"Scheduling next communication at ${MILLISECONDS.convert(intervalNs, NANOSECONDS)} " + "ms from now.") val deadlineNs = time.nanoseconds() + intervalNs eventQueue.scheduleDeferred("communication", new DeadlineFunction(deadlineNs), new CommunicationEvent()) } private class RegistrationTimeoutEvent extends EventQueue.Event { override def run(): Unit = { if (!initialRegistrationSucceeded) { error("Shutting down because we were unable to register with the controller quorum.") eventQueue.beginShutdown("registrationTimeout", new ShutdownEvent()) } } } private class CommunicationEvent extends EventQueue.Event { override def run(): Unit = { if (registered) { sendBrokerHeartbeat() } else { sendBrokerRegistration() } } } private class ShutdownEvent extends EventQueue.Event { override def run(): Unit = { info(s"Transitioning from ${_state} to ${BrokerState.SHUTTING_DOWN}.") _state = BrokerState.SHUTTING_DOWN controlledShutdownFuture.complete(null) initialCatchUpFuture.cancel(false) if (_channelManager != null) { _channelManager.shutdown() _channelManager = null } } } }
guozhangwang/kafka
core/src/main/scala/kafka/server/BrokerLifecycleManager.scala
Scala
apache-2.0
19,675
/*******************************************************************************/ /* */ /* Copyright (C) 2017 by Max Lv <[email protected]> */ /* Copyright (C) 2017 by Mygod Studio <[email protected]> */ /* */ /* This program is free software: you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation, either version 3 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* */ /*******************************************************************************/ package com.github.shadowsocks import android.app.backup.{BackupAgentHelper, FileBackupHelper, SharedPreferencesBackupHelper} import com.github.shadowsocks.acl.Acl import com.github.shadowsocks.database.DBHelper class ShadowsocksBackupAgent extends BackupAgentHelper { // The names of the SharedPreferences groups that the application maintains. These // are the same strings that are passed to getSharedPreferences(String, int). val PREFS_DISPLAY = "com.github.shadowsocks_preferences" // An arbitrary string used within the BackupAgentHelper implementation to // identify the SharedPreferencesBackupHelper's data. val MY_PREFS_BACKUP_KEY = "com.github.shadowsocks" val DATABASE = "com.github.shadowsocks.database.profile" override def onCreate() { val helper = new SharedPreferencesBackupHelper(this, PREFS_DISPLAY) addHelper(MY_PREFS_BACKUP_KEY, helper) addHelper(DATABASE, new FileBackupHelper(this, "../databases/" + DBHelper.PROFILE, Acl.CUSTOM_RULES + ".acl")) } }
hangox/shadowsocks-android
mobile/src/main/scala/com/github/shadowsocks/ShadowsocksBackupAgent.scala
Scala
gpl-3.0
2,574
package com.twitter.finagle.memcached.protocol.text import client.DecodingToResponse import client.{Decoder => ClientDecoder} import server.DecodingToCommand import server.{Decoder => ServerDecoder} import com.twitter.io.Charsets.Utf8 import com.twitter.finagle._ import com.twitter.finagle.memcached.protocol._ import com.twitter.finagle.memcached.util.ChannelBufferUtils._ import com.twitter.finagle.tracing._ import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver} import org.jboss.netty.buffer.ChannelBuffer import org.jboss.netty.channel._ import scala.collection.immutable object Memcached { def apply(stats: StatsReceiver = NullStatsReceiver) = new Memcached(stats) def get() = apply() } object MemcachedClientPipelineFactory extends ChannelPipelineFactory { def getPipeline() = { val pipeline = Channels.pipeline() pipeline.addLast("decoder", new ClientDecoder) pipeline.addLast("decoding2response", new DecodingToResponse) pipeline.addLast("encoder", new Encoder) pipeline.addLast("command2encoding", new CommandToEncoding) pipeline } } object MemcachedServerPipelineFactory extends ChannelPipelineFactory { private val storageCommands = collection.Set[ChannelBuffer]( "set", "add", "replace", "append", "prepend") def getPipeline() = { val pipeline = Channels.pipeline() // pipeline.addLast("exceptionHandler", new ExceptionHandler) pipeline.addLast("decoder", new ServerDecoder(storageCommands)) pipeline.addLast("decoding2command", new DecodingToCommand) pipeline.addLast("encoder", new Encoder) pipeline.addLast("response2encoding", new ResponseToEncoding) pipeline } } class Memcached(stats: StatsReceiver) extends CodecFactory[Command, Response] { def this() = this(NullStatsReceiver) def server = Function.const { new Codec[Command, Response] { def pipelineFactory = MemcachedServerPipelineFactory } } def client = Function.const { new Codec[Command, Response] { def pipelineFactory = MemcachedClientPipelineFactory // pass every request through a filter to create trace data override def prepareConnFactory(underlying: ServiceFactory[Command, Response]) = new MemcachedTracingFilter() andThen new MemcachedLoggingFilter(stats) andThen underlying } } } /** * Adds tracing information for each memcached request. * Including command name, when request was sent and when it was received. */ private class MemcachedTracingFilter extends SimpleFilter[Command, Response] { def apply(command: Command, service: Service[Command, Response]) = Trace.unwind { Trace.recordServiceName("memcached") Trace.recordRpc(command.name) Trace.record(Annotation.ClientSend()) val response = service(command) if (Trace.isActivelyTracing) { response onSuccess { case Values(values) => command match { case cmd: RetrievalCommand => val keys = immutable.Set(cmd.keys map { _.toString(Utf8) }: _*) val hits = values.map { case value => Trace.recordBinary(value.key.toString(Utf8), "Hit") value.key.toString(Utf8) } val misses = keys -- hits misses foreach { k => Trace.recordBinary(k.toString(Utf8), "Miss") } case _ => } case _ => } ensure { Trace.record(Annotation.ClientRecv()) } } response } } private class MemcachedLoggingFilter(stats: StatsReceiver) extends SimpleFilter[Command, Response] { private[this] val serviceName = "memcached" private[this] val error = stats.scope("error") private[this] val succ = stats.scope("success") override def apply(command: Command, service: Service[Command, Response]) = { service(command) map { response => response match { case NotFound() | Stored() | NotStored() | Exists() | Deleted() | NoOp() | Info(_, _) | InfoLines(_) | Values(_) | Number(_) => succ.counter(command.name).incr() case Error(_) => error.counter(command.name).incr() case _ => error.counter(command.name).incr() } response } } }
yancl/finagle-6.22.0
finagle-memcached/src/main/scala/com/twitter/finagle/memcached/protocol/text/Memcached.scala
Scala
apache-2.0
4,350
package org.zouzias.rx.kafka.pipes import com.cj.kafka.rx.RxConsumer import kafka.serializer.StringDecoder import org.apache.kafka.clients.producer.ProducerRecord import org.zouzias.rx.kafka.utils.StringKafkaProducer import org.zouzias.rx.kafka.utils.RxConsumerImplicits._ /** * A simple pipe from sourceTopic to TargetTopic * * Read messages from "sourceTopic" and writes them to "targetTopic" */ object SimplePipe extends App{ type StringRecord = ProducerRecord[String, String] val producer = StringKafkaProducer("localhost:9092") val sourceTopic : String = "words" val targetTopic : String = "new-words" val consumer = new RxConsumer("localhost:2181", "simple-pipe") // pipe sourceTopic -> targetTopic consumer.getStringStream(sourceTopic) .map(_.value).foreach(x => producer.send(new StringRecord(targetTopic, x))) // Block for ever? readLine() }
zouzias/kafka-rx-example
src/main/scala/org/zouzias/rx/kafka/pipes/SimplePipe.scala
Scala
apache-2.0
887
package com.github.jeanadrien.evrythng.scala.rest import com.github.jeanadrien.evrythng.scala.json.EvtJsonProtocol._ import com.github.jeanadrien.evrythng.scala.json._ import com.typesafe.scalalogging.LazyLogging /** * */ class ApplicationRestApi(override val apiKey : String) extends Environment with AuthorizedEnvironment with LazyLogging { parent => val me = new { def read = get[Application]("/applications/me") } // auth object auth { object evrythng { object users { def create(user : User) = post[User, UserStatus]("/auth/evrythng/users", user) def createAnonymous() = post[User, UserStatus]("/auth/evrythng/users", User()).queryParameter("anonymous" -> "true") def apply(userId : Ref) = new { def validate(activationCode : String) = post[UserStatus, UserStatus](s"/auth/evrythng/users/${userId}/validate", UserStatus(activationCode = Some(activationCode))) } } def login(email : String, password : String) = post[User, UserStatus]("/auth/evrythng", User(email = Some(email), password = Some(password))) } } // products val products = new ResourceContext[Product](this, "/products") with Read[Product] def product(productId : Ref) = new ProductContext(productId, apiKey) // actions def action(actionType : String) = new ActionContext("", actionType, apiKey) // TODO /scan // places val places = new ResourceContext[Place](this, "/places") with Read[Place] { def listAround(position : (Double, Double), maxDist : Double) = list().queryParameter( "lat" -> position._2.toString, "lon" -> position._1.toString, "maxDist" -> maxDist.toString ) } }
jeanadrien/evrythng-scala-sdk
src/main/scala/com/github/jeanadrien/evrythng/scala/rest/ApplicationRestApi.scala
Scala
apache-2.0
1,921
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU Affero GPL v3, the copyright holders add the following * Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3 * licence, when you create a Related Module, this Related Module is * not considered as a part of the work and may be distributed under the * license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>. * ************************************************************************************* */ package com.normation.rudder.services.marshalling import scala.xml._ import com.normation.utils.XmlUtils object MarshallingUtil { def createElem(label:String, fileFormat:String)(children:NodeSeq) : Elem = { Elem(null, label, new UnprefixedAttribute("fileFormat", fileFormat, Null) , TopScope, false, children:_*) } def createTrimedElem(label:String, fileFormat:String)(children:NodeSeq) : Elem = { XmlUtils.trim { createElem(label, fileFormat)(children) } } }
Kegeruneku/rudder
rudder-core/src/main/scala/com/normation/rudder/services/marshalling/MarshallingUtil.scala
Scala
agpl-3.0
2,153
package org.jetbrains.plugins.scala package lang package parser package parsing package expressions import com.intellij.lang.PsiBuilder import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder import org.jetbrains.plugins.scala.lang.parser.parsing.types.TypeArgs /** * @author AlexanderPodkhalyuzin * Date: 03.03.2008 */ /* * InfixExpr ::= PrefixExpr * | InfixExpr id [TypeArgs] [nl] InfixExpr */ object InfixExpr extends InfixExpr { override protected val prefixExpr = PrefixExpr } trait InfixExpr { protected val prefixExpr: PrefixExpr import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils._ def parse(builder: ScalaPsiBuilder): Boolean = { type MStack[X] = _root_.scala.collection.mutable.Stack[X] val markerStack = new MStack[PsiBuilder.Marker] val opStack = new MStack[String] val infixMarker = builder.mark var backupMarker = builder.mark var count = 0 if (!prefixExpr.parse(builder)) { backupMarker.drop() infixMarker.drop() return false } var exitOf = true while (builder.getTokenType == ScalaTokenTypes.tIDENTIFIER && !builder.newlineBeforeCurrentToken && exitOf) { //need to know associativity val s = builder.getTokenText var exit = false while (!exit) { if (opStack.isEmpty) { opStack push s val newMarker = backupMarker.precede markerStack push newMarker exit = true } else if (!compar(s, opStack.top, builder)) { opStack.pop() backupMarker.drop() backupMarker = markerStack.top.precede markerStack.pop().done(ScalaElementTypes.INFIX_EXPR) } else { opStack push s val newMarker = backupMarker.precede markerStack push newMarker exit = true } } val setMarker = builder.mark val opMarker = builder.mark builder.advanceLexer() //Ate id opMarker.done(ScalaElementTypes.REFERENCE_EXPRESSION) TypeArgs.parse(builder, isPattern = false) if (builder.twoNewlinesBeforeCurrentToken) { setMarker.rollbackTo() count = 0 backupMarker.drop() exitOf = false } else { backupMarker.drop() backupMarker = builder.mark if (!prefixExpr.parse(builder)) { setMarker.rollbackTo() count = 0 exitOf = false } else { setMarker.drop() count = count + 1 } } } if (exitOf) backupMarker.drop() if (count > 0) { while (count > 0 && markerStack.nonEmpty) { markerStack.pop().done(ScalaElementTypes.INFIX_EXPR) count -= 1 } } infixMarker.drop() while (markerStack.nonEmpty) { markerStack.pop().drop() } true } //private var assoc: Int = 0 //this mark associativity: left - 1, right - -1 //compares two operators a id2 b id1 c private def compar(id1: String, id2: String, builder: PsiBuilder): Boolean = { if (priority(id1, assignments = true) < priority(id2, assignments = true)) true // a * b + c =((a * b) + c) else if (priority(id1, assignments = true) > priority(id2, assignments = true)) false // a + b * c = (a + (b * c)) else if (associate(id1) == associate(id2)) if (associate(id1) == -1) true else false else { builder error ErrMsg("wrong.type.associativity") false } } //Associations of operator def associate(id: String): Int = { id.charAt(id.length - 1) match { case ':' => -1 // right case _ => +1 // left } } }
whorbowicz/intellij-scala
src/org/jetbrains/plugins/scala/lang/parser/parsing/expressions/InfixExpr.scala
Scala
apache-2.0
3,726