code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package dhg.ccg.parse.scg.mcmc import org.junit.Test import org.junit.Assert._ import dhg.ccg.parse._ import dhg.ccg.prob._ import dhg.ccg.cat._ import dhg.ccg.parse.pcfg._ import dhg.ccg.parse.scg._ import dhg.ccg.tagdict.StartEndTags import dhg.util._ import dhg.ccg.tagdict._ import dhg.ccg.test.TestUtil.DoubleIteratorRandomGenerator import org.apache.commons.math3.random.MersenneTwister class ScgAcceptanceSamplerTests { val S = cat"S".asInstanceOf[AtomCat] val NP = cat"NP".asInstanceOf[AtomCat] val N = cat"N".asInstanceOf[AtomCat] val PP = cat"PP".asInstanceOf[AtomCat] val STA = cat"<S>" val END = cat"<E>" val A = cat"A".asInstanceOf[AtomCat] val B = cat"B".asInstanceOf[AtomCat] val C = cat"C".asInstanceOf[AtomCat] val D = cat"D".asInstanceOf[AtomCat] val E = cat"E".asInstanceOf[AtomCat] val F = cat"F".asInstanceOf[AtomCat] val G = cat"G".asInstanceOf[AtomCat] val H = cat"H".asInstanceOf[AtomCat] @Test def test_FullScgAcceptanceSampler_accept { type Word = String val mockRootDist = new LogProbabilityDistribution[Cat] { def apply(x: Cat): LogDouble = ??? def sample(): Cat = ??? def defaultProb = ??? } val mockProdDist = new ConditionalLogProbabilityDistribution[Cat, Prod] { def apply(right: Prod, given: Cat): LogDouble = ??? def sample(given: Cat): Prod = ??? } val mockLctxDist = new ConditionalLogProbabilityDistribution[Cat, Cat] { def apply(left: Cat, given: Cat): LogDouble = ??? def sample(given: Cat): Cat = ??? } val mockRctxDist = new ConditionalLogProbabilityDistribution[Cat, Cat] { def apply(right: Cat, given: Cat): LogDouble = ??? def sample(given: Cat): Cat = ??? } val T1: CcgTree = CcgLeaf(A, "a", "FAKEPOS") val T2: CcgTree = CcgLeaf(B, "b", "FAKEPOS") val T3: CcgTree = CcgLeaf(C, "c", "FAKEPOS") val T4: CcgTree = CcgLeaf(D, "d", "FAKEPOS") val T5: CcgTree = CcgLeaf(E, "e", "FAKEPOS") val mockWeighter = new ScgWeighter { def weight(tree: CcgTree, rootDist: LogProbabilityDistribution[Cat], prodDist: ConditionalLogProbabilityDistribution[Cat, Prod], lctxDist: ConditionalLogProbabilityDistribution[Cat, Cat], rctxDist: ConditionalLogProbabilityDistribution[Cat, Cat])(se: StartEndTags[Cat]): LogDouble = { assertSame(mockRootDist, rootDist) assertSame(mockProdDist, prodDist) assertSame(mockLctxDist, lctxDist) assertSame(mockRctxDist, rctxDist) LogDouble(tree match { case T1 => 0.11 case T2 => 0.21 case T3 => 0.31 case T4 => 0.41 case T5 => 0.51 case _ => fail(f"no match for $tree"); ??? }) } def pcfgWeight(tree: CcgTree, rootDist: LogProbabilityDistribution[Cat], prodDist: ConditionalLogProbabilityDistribution[Cat, Prod]): LogDouble = { assertSame(mockRootDist, rootDist) assertSame(mockProdDist, prodDist) LogDouble(tree match { case T1 => 0.33 case T2 => 0.13 case T3 => 0.23 case T4 => 0.53 case T5 => 0.43 case _ => fail(f"no match for $tree"); ??? }) } def ctxWeight(tree: CcgTree, lctxDist: ConditionalLogProbabilityDistribution[Cat, Cat], rctxDist: ConditionalLogProbabilityDistribution[Cat, Cat])(se: StartEndTags[Cat]): LogDouble = ??? } val se = SimpleStartEndTags[Cat](STA, END) def r(d: Double) = DoubleIteratorRandomGenerator(Iterator(d)) val cas = new FullScgAcceptanceSampler(mockWeighter) val (a1, r1, ag1) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.10))(se) // 0.11/0.41 = 0.2682926829268293 assertTrue(a1) assertEquals(0.2682926829268293, r1, 1e-9) assertTrue(ag1) val (a2, r2, ag2) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.25))(se) // 0.11/0.41 = 0.2682926829268293 assertTrue(a2) assertEquals(0.2682926829268293, r2, 1e-9) assertTrue(ag2) val (a3, r3, ag3) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.50))(se) // 0.11/0.41 = 0.2682926829268293 assertFalse(a3) assertEquals(0.2682926829268293, r3, 1e-9) assertTrue(ag3) val (a4, r4, ag4) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.80))(se) // 0.11/0.41 = 0.2682926829268293 assertFalse(a4) assertEquals(0.2682926829268293, r4, 1e-9) assertTrue(ag4) val n1 = Vector.fill(1000000)(cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, new MersenneTwister)(se)._1).count(identity) // 0.11/0.41 = 0.2682926829268293 assertEquals(0.2682926829268293, n1 / 1000000.0, 1e-3) // new is better than current: unconditionally accept val (a5, r5, ag5) = cas.accept(newTree = T2, curTree = T1, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, DoubleIteratorRandomGenerator(Iterator()))(se) // 0.21/0.11 = 1.909090909090909 assertTrue(a5) assertEquals(1.909090909090909, r5, 1e-9) assertFalse(ag5) } @Test def test_ContextScgAcceptanceSampler_accept { type Word = String val mockRootDist = new LogProbabilityDistribution[Cat] { def apply(x: Cat): LogDouble = ??? def sample(): Cat = ??? def defaultProb = ??? } val mockProdDist = new ConditionalLogProbabilityDistribution[Cat, Prod] { def apply(right: Prod, given: Cat): LogDouble = ??? def sample(given: Cat): Prod = ??? } val mockLctxDist = new ConditionalLogProbabilityDistribution[Cat, Cat] { def apply(left: Cat, given: Cat): LogDouble = ??? def sample(given: Cat): Cat = ??? } val mockRctxDist = new ConditionalLogProbabilityDistribution[Cat, Cat] { def apply(right: Cat, given: Cat): LogDouble = ??? def sample(given: Cat): Cat = ??? } val T1: CcgTree = CcgLeaf(A, "a", "FAKEPOS") val T2: CcgTree = CcgLeaf(B, "b", "FAKEPOS") val T3: CcgTree = CcgLeaf(C, "c", "FAKEPOS") val T4: CcgTree = CcgLeaf(D, "d", "FAKEPOS") val T5: CcgTree = CcgLeaf(E, "e", "FAKEPOS") val mockWeighter = new ScgWeighter { def weight(tree: CcgTree, rootDist: LogProbabilityDistribution[Cat], prodDist: ConditionalLogProbabilityDistribution[Cat, Prod], lctxDist: ConditionalLogProbabilityDistribution[Cat, Cat], rctxDist: ConditionalLogProbabilityDistribution[Cat, Cat])(se: StartEndTags[Cat]): LogDouble = ??? def pcfgWeight(tree: CcgTree, rootDist: LogProbabilityDistribution[Cat], prodDist: ConditionalLogProbabilityDistribution[Cat, Prod]): LogDouble = { assertSame(mockRootDist, rootDist) assertSame(mockProdDist, prodDist) LogDouble(tree match { case T1 => 0.33 case T2 => 0.13 case T3 => 0.23 case T4 => 0.53 case T5 => 0.43 case _ => fail(f"no match for $tree"); ??? }) } def ctxWeight(tree: CcgTree, lctxDist: ConditionalLogProbabilityDistribution[Cat, Cat], rctxDist: ConditionalLogProbabilityDistribution[Cat, Cat])(se: StartEndTags[Cat]): LogDouble = { assertSame(mockLctxDist, lctxDist) assertSame(mockRctxDist, rctxDist) LogDouble(tree match { case T1 => 0.11 case T2 => 0.21 case T3 => 0.31 case T4 => 0.41 case T5 => 0.51 case _ => fail(f"no match for $tree"); ??? }) } } val se = SimpleStartEndTags[Cat](STA, END) def r(d: Double) = DoubleIteratorRandomGenerator(Iterator(d)) val cas = new ContextScgAcceptanceSampler(mockWeighter) val (a1, r1, ag1) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.10))(se) // 0.11/0.41 = 0.2682926829268293 assertTrue(a1) assertEquals(0.2682926829268293, r1, 1e-9) assertTrue(ag1) val (a2, r2, ag2) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.25))(se) // 0.11/0.41 = 0.2682926829268293 assertTrue(a2) assertEquals(0.2682926829268293, r2, 1e-9) assertTrue(ag2) val (a3, r3, ag3) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.50))(se) // 0.11/0.41 = 0.2682926829268293 assertFalse(a3) assertEquals(0.2682926829268293, r3, 1e-9) assertTrue(ag3) val (a4, r4, ag4) = cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, r(0.80))(se) // 0.11/0.41 = 0.2682926829268293 assertFalse(a4) assertEquals(0.2682926829268293, r4, 1e-9) assertTrue(ag4) val n1 = Vector.fill(1000000)(cas.accept(newTree = T1, curTree = T4, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, new MersenneTwister)(se)._1).count(identity) // 0.11/0.41 = 0.2682926829268293 assertEquals(0.2682926829268293, n1 / 1000000.0, 1e-3) // new is better than current: unconditionally accept val (a5, r5, ag5) = cas.accept(newTree = T2, curTree = T1, mockRootDist, mockProdDist, mockLctxDist, mockRctxDist, DoubleIteratorRandomGenerator(Iterator()))(se) // 0.21/0.11 = 1.909090909090909 assertTrue(a5) assertEquals(1.909090909090909, r5, 1e-9) assertFalse(ag5) } }
dhgarrette/2015-ccg-parsing
src/test/scala/dhg/ccg/parse/scg/mcmc/ScgContextAcceptanceSamplerTests.scala
Scala
apache-2.0
9,490
/** * (c) Copyright 2013 Robert Chu. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package robert import scala.collection.mutable import java.io.FileInputStream import java.io.PrintWriter import io.backchat.hookup.BinaryMessage import io.backchat.hookup.Connected import io.backchat.hookup.HookupClient import io.backchat.hookup.HookupServer import io.backchat.hookup.HookupServer.HookupServerClient import io.backchat.hookup.TextMessage import japa.parser.JavaParser import japa.parser.ast.CompilationUnit import org.slf4j.Logger import org.slf4j.LoggerFactory import robert.protobuf.AstProtos object ProtoBufServeJava { val logger: Logger = LoggerFactory.getLogger(this.getClass) def main(args: Array[String]) { // Setup the server client. val server = HookupServer(8125) { new AstServerClient() } // Start the server. server.start } } class AstServerClient extends HookupServerClient { import AstServerClient._ def onConnect() { logger.info("Connection opened!") } def onMessage(text: String) { // Parse the text. val request: AstProtos.AstRequest = base64DecodeRequest(text) request.getRequestType match { case "get" => { // Extract the get request. val getRequest: AstProtos.AstGetRequest = { assume( request.getGetRequest != null, "A request of type 'get' should have a populated get_request field") assume( request.getGetRequest.getPath != null, "A get request should have a path specified") logger.info(s"Received get request: ${request.getGetRequest.toString}") request.getGetRequest } val getResponse = get(getRequest) val response = AstProtos.AstResponse .newBuilder() .setGetResponse(getResponse) .build() // Send the response. val base64: String = base64EncodeResponse(response) logger.debug(s"Sending $base64") send(base64) } case "put" => { // Extract the put request. val putRequest: AstProtos.AstPutRequest = { assume( request.getPutRequest != null, "A request of type 'put' should have a populated put_request field") assume( request.getPutRequest.getPath != null, "A put request should have a path specified") logger.info(s"Received put request: ${request.getPutRequest.toString}") request.getPutRequest } val putResponse = put(putRequest) val response = AstProtos.AstResponse .newBuilder() .setPutResponse(putResponse) .build() // Send the result. val base64: String = base64EncodeResponse(response) logger.debug(s"Sending $base64") send(base64) } } } override def receive: HookupClient.Receive = { case Connected => onConnect() case TextMessage(text) => onMessage(text) case BinaryMessage(content) => } } //final case class Memoize[-KeyType, +CachedType]( // function: (KeyType => CachedType), // cache: mutable.Map[KeyType, CachedType] = mutable.Map()) // extends (KeyType => CachedType) { // override def apply(key: KeyType): CachedType = { // if (cache.contains(key)) { // cache(key) // } else { // val value = function(key) // cache += (key -> value) // value // } // } //} object AstServerClient { val logger: Logger = LoggerFactory.getLogger(classOf[AstServerClient]) /** * Encodes a protocol buffer into a base64 string. * * @param response message to encode. * @return the base64 string encoded message. */ def base64EncodeResponse(response: AstProtos.AstResponse): String = { new sun.misc.BASE64Encoder().encode(response.toByteArray) } // val base64EncodeResponseCache: AstProtos.AstResponse => String = Memoize(base64EncodeResponse) // // /** // * Encodes a protocol buffer into a base64 string or uses a cached encoding. Caches all encode // * operations for later use. // * // * @param response message to encode. // * @return the base64 string encoded message. // */ // def base64EncodeResponseCached(response: AstProtos.AstResponse): String = // base64EncodeResponseCache(response) /** * Decodes a protocol buffer encoded into a base64 string containing a request message from a * websocket client. * * @param request string to decode. * @return the request message from a websocket client. */ def base64DecodeRequest(request: String): AstProtos.AstRequest = { val requestBytes: Array[Byte] = new sun.misc.BASE64Decoder().decodeBuffer(request) AstProtos.AstRequest.parseFrom(requestBytes) } // val base64DecodeRequestCache: String => AstProtos.AstRequest = Memoize(base64DecodeRequest) // // /** // * Decodes a protocol buffer encoded into a base64 string containing a request message from a // * websocket client or uses a cached decoding. Caches all decode operations for later use. // * // * @param request string to decode. // * @return the request message from a websocket client. // */ // def base64DecodeRequestCached(request: String): AstProtos.AstRequest = // base64DecodeRequestCache(request) /** * Parses java code from a file. * * @param path to a java file to parse. * @return the ast of the java source code. */ def parseJavaFile(path: String): CompilationUnit = { ResourceUtils.doAndClose(new FileInputStream(path)) { JavaParser.parse } } /** * Writes an AST to a file. * * @param path to write to. * @param ast to write to a file. */ def writeJavaFile(path: String, ast: CompilationUnit) { ResourceUtils.doAndClose(new PrintWriter(path)) { _.println(ast.toString) } } /** * Handles 'get' requests from websocket clients. Returns the requested AST section. * * @param request to handle. * @return the response to respond with. */ def get(request: AstProtos.AstGetRequest): AstProtos.AstGetResponse = { // Read an AST from the provided coordinates. val ast: CompilationUnit = { val path: String = request.getPath // val version: Option[Long] = Option(request.getVersion) // Ignored for now. parseJavaFile(path) } // Return the requested AST. logger.debug(s"Encoding ${ast.toString}") val node: AstNode = (new JavaAstVisitor).visit(ast, null) val pbNode: AstProtos.AstNode = ProtoBufAstSerDe.serialize(node) AstProtos.AstGetResponse .newBuilder() .setRoot(pbNode) .build() } /** * Handles 'put' requests from websocket clients. Writes the specified AST to the specified * location. Returns an error code if an error occurred. * * @param request to handle. * @return the response to respond with. */ def put(request: AstProtos.AstPutRequest): AstProtos.AstPutResponse = { // Write the specified AST to the provided coordinates. val ast: CompilationUnit = { // TODO: Convert this AstNode to its CompilationUnit counterpart. // val node: AstNode = ProtoBufAstSerDe.deserialize(request.getRoot) null } val path: String = request.getPath writeJavaFile(path, ast) // Return the result. logger.debug(s"Encoding ${ast.toString}") AstProtos.AstPutResponse .newBuilder() .build() } }
robotoer/ast-java
src/main/scala/robert/ProtoBufServeJava.scala
Scala
apache-2.0
8,046
package org.mbari.smith import com.google.inject.Injector import vars.annotation.ui.{StateLookup, ToolBelt} import vars.annotation.ui.imagepanel.Measurement import scala.collection.JavaConverters._ import scala.util.{Failure, Success, Try} /** * Created by brian on 8/6/14. */ object FindBogusMeasurementAssociations extends App { val toolBelt = StateLookup.GUICE_INJECTOR.getInstance(classOf[ToolBelt]) val dao = toolBelt.getAnnotationDAOFactory.newAssociationDAO() val videoArchiveName = args(0) val videoFrames = CoverageEstimator.fetchAnnotations(videoArchiveName) val associations = for { vf <- videoFrames obs <- vf.getObservations.asScala ass <- obs.getAssociations.asScala if ass.getLinkName == Measurement.MEASUREMENT_LINKNAME } yield ass val t = associations.map { a => Try(Measurement.fromLink(a)) match { case Success(m) => Option(m) case Failure(e) => { println(s"FAIL: ${a.getPrimaryKey}: $a") dao.startTransaction() val b = dao.find(a) b.setLinkValue(a.getLinkValue + " undefined") dao.commit() dao.endTransaction() } } } }
hohonuuli/vars
vars-standalone/src/main/scala/org/mbari/smith/FindBogusMeasurementAssociations.scala
Scala
lgpl-2.1
1,154
package unfiltered.response.link object Param { /** Predefined parameter types as specified in [[http://tools.ietf.org/html/rfc5988#section-5 section-5]]. Note that `rev` is omitted as it has been deprecated by the specification. */ sealed abstract class Type(val name: String) case object Rel extends Type("rel") case object Anchor extends Type("anchor") case object Hreflang extends Type("hreflang") case object Media extends Type("media") case object Title extends Type("title") case object TitleStar extends Type("title*") case object ContentType extends Type("type") /** The extension type supporting `link-extension` parameters. */ private [link] final case class ExtensionType (override val name: String) extends Type(name) /** Construct an extension parameter. */ def extension(paramType: String): String => Extension = value => Extension(ExtensionType(paramType), value) /** Extractor for parameter types that cannot repeat within a `Ref`. */ object NonRepeatable { def unapply(param: Param) = param.paramType match { case Rel | Media | Title | TitleStar | ContentType => Some(param) case _ => None } } } /** Root type for all implementations of `link-param` as specified in [[https://tools.ietf.org/html/rfc5988#section-5 section-5]]. Predefined parameter values are specified in various documents linked or referred to in the [[https://tools.ietf.org/html/rfc5988 rfc5988]]; see [[Media]] and [[Rel]]. New parameter types can be added to `Link` headers as `link-extension` parameters. Extension parameters can be constructed via [[Param.extension]]. */ sealed abstract class Param(val paramType: Param.Type, val value: String) final case class Anchor(uri: String) extends Param(Param.Anchor, uri) final case class Hreflang(lang: String) extends Param(Param.Hreflang, lang) final case class Title(title: String) extends Param(Param.Title, title) final case class TitleStar(titleStar: String) extends Param(Param.TitleStar, titleStar) final case class MediaType(typeName: String, subTypeName: String) extends Param(Param.ContentType, s"$typeName/$subTypeName") final case class Extension private[link] (override val paramType: Param.ExtensionType, override val value: String) extends Param(paramType, value) /** Target media types as described in [[https://tools.ietf.org/html/rfc5988#section-5.4 section-5.4]] The meaning and set of possible values for this parameter are specified in the [[https://www.w3.org/TR/html401/types.html#h-6.13 HTML 401 Types]] specification. */ sealed abstract class Media(val mediaType: String) extends Param(Param.Media, mediaType) { /** According to [[https://www.w3.org/TR/html401/types.html#h-6.13 HTML 401 Types]], `Media` is a monoid resulting in the accumulated media for a single `media` parameter. */ def :+(that: Media) = CompositeMedia(this, that) final override def toString = s"Media($mediaType)" } private [link] final case class CompositeMedia (a: Media, b: Media) extends Media(a.mediaType + ", " + b.mediaType) case object Screen extends Media("screen") case object Tty extends Media("tty") case object Tv extends Media("tv") case object Projection extends Media("projection") case object Handheld extends Media("handheld") case object Print extends Media("print") case object Braille extends Media("braille") case object Aural extends Media("aural") case object All extends Media("all") /** A link relation type as described in [[https://tools.ietf.org/html/rfc5988#section-5.3 section-5.3]]. The relation type is specified as link parameter for which a global set of possible values is catalogued at [[http://www.iana.org/assignments/link-relations/link-relations.xml]]. The specification also permits extension types to be provided as absolute URLs (see the [[ExtensionRel]] type). */ sealed abstract class Rel(val relType: String) extends Param(Param.Rel, relType) { /** According to [[https://tools.ietf.org/html/rfc5988#section-5.5 section-5.5]], `Rel` is a monoid resulting in the accumulated relation types for a single `rel` parameter. */ def :+(that: Rel) = CompositeRel(this, that) final override def toString = s"Rel($relType)" } private [link] final case class CompositeRel (a: Rel, b: Rel) extends Rel(a.relType + " " + b.relType) /** Support for extension relation types as specified in [[https://tools.ietf.org/html/rfc5988#section-4.2 section-4.2]] */ final case class ExtensionRel (uri: String) extends Rel(uri) /* The complete set of catalogued relation types. */ case object About extends Rel("about") case object Alternate extends Rel("alternate") case object Appendix extends Rel("appendix") case object Archives extends Rel("archives") case object Author extends Rel("author") case object Bookmark extends Rel("bookmark") case object Canonical extends Rel("canonical") case object Chapter extends Rel("chapter") case object Collection extends Rel("collection") case object Contents extends Rel("contents") case object Copyright extends Rel("copyright") case object CreateForm extends Rel("create-form") case object Current extends Rel("current") case object Describedby extends Rel("describedby") case object Describes extends Rel("describes") case object Disclosure extends Rel("disclosure") case object Duplicate extends Rel("duplicate") case object Edit extends Rel("edit") case object EditForm extends Rel("edit-form") case object EditMedia extends Rel("edit-media") case object Enclosure extends Rel("enclosure") case object First extends Rel("first") case object Glossary extends Rel("glossary") case object Help extends Rel("help") case object Hosts extends Rel("hosts") case object Hub extends Rel("hub") case object Icon extends Rel("icon") case object Index extends Rel("index") case object Item extends Rel("item") case object Last extends Rel("last") case object LatestVersion extends Rel("latest-version") case object License extends Rel("license") case object Lrdd extends Rel("lrdd") case object Memento extends Rel("memento") case object Monitor extends Rel("monitor") case object MonitorGroup extends Rel("monitor-group") case object Next extends Rel("next") case object NextArchive extends Rel("next-archive") case object Nofollow extends Rel("nofollow") case object Noreferrer extends Rel("noreferrer") case object Original extends Rel("original") case object Payment extends Rel("payment") case object PredecessorVersion extends Rel("predecessor-version") case object Prefetch extends Rel("prefetch") case object Prev extends Rel("prev") case object Preview extends Rel("preview") case object Previous extends Rel("previous") case object PrevArchive extends Rel("prev-archive") case object PrivacyPolicy extends Rel("privacy-policy") case object Profile extends Rel("profile") case object Related extends Rel("related") case object Replies extends Rel("replies") case object Search extends Rel("search") case object Section extends Rel("section") case object Self extends Rel("self") case object Service extends Rel("service") case object Start extends Rel("start") case object Stylesheet extends Rel("stylesheet") case object Subsection extends Rel("subsection") case object SuccessorVersion extends Rel("successor-version") case object Tag extends Rel("tag") case object TermsOfService extends Rel("terms-of-service") case object Timegate extends Rel("timegate") case object Timemap extends Rel("timemap") case object Type extends Rel("type") case object Up extends Rel("up") case object VersionHistory extends Rel("version-history") case object Via extends Rel("via") case object WorkingCopy extends Rel("working-copy") case object WorkingCopyOf extends Rel("working-copy-of")
omarkilani/unfiltered
library/src/main/scala/response/link/Param.scala
Scala
mit
7,789
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.parquet import java.io.File import org.apache.hadoop.fs.Path import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.expressions.SpecificMutableRow import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier} import org.apache.spark.sql.execution.datasources.parquet.TestingUDT.{NestedStruct, NestedStructUDT} import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types._ import org.apache.spark.util.Utils /** * A test suite that tests various Parquet queries. */ class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext { import testImplicits._ test("simple select queries") { withParquetTable((0 until 10).map(i => (i, i.toString)), "t") { checkAnswer(sql("SELECT _1 FROM t where t._1 > 5"), (6 until 10).map(Row.apply(_))) checkAnswer(sql("SELECT _1 FROM t as tmp where tmp._1 < 5"), (0 until 5).map(Row.apply(_))) } } test("appending") { val data = (0 until 10).map(i => (i, i.toString)) sqlContext.createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp") withParquetTable(data, "t") { sql("INSERT INTO TABLE t SELECT * FROM tmp") checkAnswer(sqlContext.table("t"), (data ++ data).map(Row.fromTuple)) } sqlContext.catalog.unregisterTable(TableIdentifier("tmp")) } test("overwriting") { val data = (0 until 10).map(i => (i, i.toString)) sqlContext.createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp") withParquetTable(data, "t") { sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp") checkAnswer(sqlContext.table("t"), data.map(Row.fromTuple)) } sqlContext.catalog.unregisterTable(TableIdentifier("tmp")) } test("self-join") { // 4 rows, cells of column 1 of row 2 and row 4 are null val data = (1 to 4).map { i => val maybeInt = if (i % 2 == 0) None else Some(i) (maybeInt, i.toString) } withParquetTable(data, "t") { val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x._1 = y._1") val queryOutput = selfJoin.queryExecution.analyzed.output assertResult(4, "Field count mismatches")(queryOutput.size) assertResult(2, "Duplicated expression ID in query plan:\\n $selfJoin") { queryOutput.filter(_.name == "_1").map(_.exprId).size } checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3"))) } } test("nested data - struct with array field") { val data = (1 to 10).map(i => Tuple1((i, Seq("val_$i")))) withParquetTable(data, "t") { checkAnswer(sql("SELECT _1._2[0] FROM t"), data.map { case Tuple1((_, Seq(string))) => Row(string) }) } } test("nested data - array of struct") { val data = (1 to 10).map(i => Tuple1(Seq(i -> "val_$i"))) withParquetTable(data, "t") { checkAnswer(sql("SELECT _1[0]._2 FROM t"), data.map { case Tuple1(Seq((_, string))) => Row(string) }) } } test("SPARK-1913 regression: columns only referenced by pushed down filters should remain") { withParquetTable((1 to 10).map(Tuple1.apply), "t") { checkAnswer(sql("SELECT _1 FROM t WHERE _1 < 10"), (1 to 9).map(Row.apply(_))) } } test("SPARK-5309 strings stored using dictionary compression in parquet") { withParquetTable((0 until 1000).map(i => ("same", "run_" + i /100, 1)), "t") { checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t GROUP BY _1, _2"), (0 until 10).map(i => Row("same", "run_" + i, 100))) checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t WHERE _2 = 'run_5' GROUP BY _1, _2"), List(Row("same", "run_5", 100))) } } test("SPARK-6917 DecimalType should work with non-native types") { val data = (1 to 10).map(i => Row(Decimal(i, 18, 0), new java.sql.Timestamp(i))) val schema = StructType(List(StructField("d", DecimalType(18, 0), false), StructField("time", TimestampType, false)).toArray) withTempPath { file => val df = sqlContext.createDataFrame(sparkContext.parallelize(data), schema) df.write.parquet(file.getCanonicalPath) val df2 = sqlContext.read.parquet(file.getCanonicalPath) checkAnswer(df2, df.collect().toSeq) } } test("Enabling/disabling merging partfiles when merging parquet schema") { def testSchemaMerging(expectedColumnNumber: Int): Unit = { withTempDir { dir => val basePath = dir.getCanonicalPath sqlContext.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString) sqlContext.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=2").toString) // delete summary files, so if we don't merge part-files, one column will not be included. Utils.deleteRecursively(new File(basePath + "/foo=1/_metadata")) Utils.deleteRecursively(new File(basePath + "/foo=1/_common_metadata")) assert(sqlContext.read.parquet(basePath).columns.length === expectedColumnNumber) } } withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true", SQLConf.PARQUET_SCHEMA_RESPECT_SUMMARIES.key -> "true") { testSchemaMerging(2) } withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true", SQLConf.PARQUET_SCHEMA_RESPECT_SUMMARIES.key -> "false") { testSchemaMerging(3) } } test("Enabling/disabling schema merging") { def testSchemaMerging(expectedColumnNumber: Int): Unit = { withTempDir { dir => val basePath = dir.getCanonicalPath sqlContext.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString) sqlContext.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=2").toString) assert(sqlContext.read.parquet(basePath).columns.length === expectedColumnNumber) } } withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true") { testSchemaMerging(3) } withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "false") { testSchemaMerging(2) } } test("SPARK-8990 DataFrameReader.parquet() should respect user specified options") { withTempPath { dir => val basePath = dir.getCanonicalPath sqlContext.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString) sqlContext.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=a").toString) // Disables the global SQL option for schema merging withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "false") { assertResult(2) { // Disables schema merging via data source option sqlContext.read.option("mergeSchema", "false").parquet(basePath).columns.length } assertResult(3) { // Enables schema merging via data source option sqlContext.read.option("mergeSchema", "true").parquet(basePath).columns.length } } } } test("SPARK-9119 Decimal should be correctly written into parquet") { withTempPath { dir => val basePath = dir.getCanonicalPath val schema = StructType(Array(StructField("name", DecimalType(10, 5), false))) val rowRDD = sparkContext.parallelize(Array(Row(Decimal("67123.45")))) val df = sqlContext.createDataFrame(rowRDD, schema) df.write.parquet(basePath) val decimal = sqlContext.read.parquet(basePath).first().getDecimal(0) assert(Decimal("67123.45") === Decimal(decimal)) } } test("SPARK-10005 Schema merging for nested struct") { withTempPath { dir => val path = dir.getCanonicalPath def append(df: DataFrame): Unit = { df.write.mode(SaveMode.Append).parquet(path) } // Note that both the following two DataFrames contain a single struct column with multiple // nested fields. append((1 to 2).map(i => Tuple1((i, i))).toDF()) append((1 to 2).map(i => Tuple1((i, i, i))).toDF()) withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true") { checkAnswer( sqlContext.read.option("mergeSchema", "true").parquet(path), Seq( Row(Row(1, 1, null)), Row(Row(2, 2, null)), Row(Row(1, 1, 1)), Row(Row(2, 2, 2)))) } } } test("SPARK-10301 requested schema clipping - same schema") { withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("a", LongType, nullable = true) .add("b", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(0L, 1L))) } } test("SPARK-11997 parquet with null partition values") { withTempPath { dir => val path = dir.getCanonicalPath sqlContext.range(1, 3) .selectExpr("if(id % 2 = 0, null, id) AS n", "id") .write.partitionBy("n").parquet(path) checkAnswer( sqlContext.read.parquet(path).filter("n is null"), Row(2, null)) } } // This test case is ignored because of parquet-mr bug PARQUET-370 ignore("SPARK-10301 requested schema clipping - schemas with disjoint sets of fields") { withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("c", LongType, nullable = true) .add("d", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(null, null))) } } test("SPARK-10301 requested schema clipping - requested schema contains physical schema") { withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("a", LongType, nullable = true) .add("b", LongType, nullable = true) .add("c", LongType, nullable = true) .add("d", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(0L, 1L, null, null))) } withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext.range(1).selectExpr("NAMED_STRUCT('a', id, 'd', id + 3) AS s").coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("a", LongType, nullable = true) .add("b", LongType, nullable = true) .add("c", LongType, nullable = true) .add("d", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(0L, null, null, 3L))) } } test("SPARK-10301 requested schema clipping - physical schema contains requested schema") { withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext .range(1) .selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2, 'd', id + 3) AS s") .coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("a", LongType, nullable = true) .add("b", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(0L, 1L))) } withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext .range(1) .selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2, 'd', id + 3) AS s") .coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("a", LongType, nullable = true) .add("d", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(0L, 3L))) } } test("SPARK-10301 requested schema clipping - schemas overlap but don't contain each other") { withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext .range(1) .selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s") .coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("b", LongType, nullable = true) .add("c", LongType, nullable = true) .add("d", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(1L, 2L, null))) } } test("SPARK-10301 requested schema clipping - deeply nested struct") { withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext .range(1) .selectExpr("NAMED_STRUCT('a', ARRAY(NAMED_STRUCT('b', id, 'c', id))) AS s") .coalesce(1) df.write.parquet(path) val userDefinedSchema = new StructType() .add("s", new StructType() .add( "a", ArrayType( new StructType() .add("b", LongType, nullable = true) .add("d", StringType, nullable = true), containsNull = true), nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(Seq(Row(0, null))))) } } test("SPARK-10301 requested schema clipping - out of order") { withTempPath { dir => val path = dir.getCanonicalPath val df1 = sqlContext .range(1) .selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s") .coalesce(1) val df2 = sqlContext .range(1, 2) .selectExpr("NAMED_STRUCT('c', id + 2, 'b', id + 1, 'd', id + 3) AS s") .coalesce(1) df1.write.parquet(path) df2.write.mode(SaveMode.Append).parquet(path) val userDefinedSchema = new StructType() .add("s", new StructType() .add("a", LongType, nullable = true) .add("b", LongType, nullable = true) .add("d", LongType, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Seq( Row(Row(0, 1, null)), Row(Row(null, 2, 4)))) } } test("SPARK-10301 requested schema clipping - schema merging") { withTempPath { dir => val path = dir.getCanonicalPath val df1 = sqlContext .range(1) .selectExpr("NAMED_STRUCT('a', id, 'c', id + 2) AS s") .coalesce(1) val df2 = sqlContext .range(1, 2) .selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s") .coalesce(1) df1.write.mode(SaveMode.Append).parquet(path) df2.write.mode(SaveMode.Append).parquet(path) checkAnswer( sqlContext .read .option("mergeSchema", "true") .parquet(path) .selectExpr("s.a", "s.b", "s.c"), Seq( Row(0, null, 2), Row(1, 2, 3))) } } testStandardAndLegacyModes("SPARK-10301 requested schema clipping - UDT") { withTempPath { dir => val path = dir.getCanonicalPath val df = sqlContext .range(1) .selectExpr( """NAMED_STRUCT( | 'f0', CAST(id AS STRING), | 'f1', NAMED_STRUCT( | 'a', CAST(id + 1 AS INT), | 'b', CAST(id + 2 AS LONG), | 'c', CAST(id + 3.5 AS DOUBLE) | ) |) AS s """.stripMargin) .coalesce(1) df.write.mode(SaveMode.Append).parquet(path) val userDefinedSchema = new StructType() .add( "s", new StructType() .add("f1", new NestedStructUDT, nullable = true), nullable = true) checkAnswer( sqlContext.read.schema(userDefinedSchema).parquet(path), Row(Row(NestedStruct(1, 2L, 3.5D)))) } } test("expand UDT in StructType") { val schema = new StructType().add("n", new NestedStructUDT, nullable = true) val expected = new StructType().add("n", new NestedStructUDT().sqlType, nullable = true) assert(CatalystReadSupport.expandUDT(schema) === expected) } test("expand UDT in ArrayType") { val schema = new StructType().add( "n", ArrayType( elementType = new NestedStructUDT, containsNull = false), nullable = true) val expected = new StructType().add( "n", ArrayType( elementType = new NestedStructUDT().sqlType, containsNull = false), nullable = true) assert(CatalystReadSupport.expandUDT(schema) === expected) } test("expand UDT in MapType") { val schema = new StructType().add( "n", MapType( keyType = IntegerType, valueType = new NestedStructUDT, valueContainsNull = false), nullable = true) val expected = new StructType().add( "n", MapType( keyType = IntegerType, valueType = new NestedStructUDT().sqlType, valueContainsNull = false), nullable = true) assert(CatalystReadSupport.expandUDT(schema) === expected) } } object TestingUDT { @SQLUserDefinedType(udt = classOf[NestedStructUDT]) case class NestedStruct(a: Integer, b: Long, c: Double) class NestedStructUDT extends UserDefinedType[NestedStruct] { override def sqlType: DataType = new StructType() .add("a", IntegerType, nullable = true) .add("b", LongType, nullable = false) .add("c", DoubleType, nullable = false) override def serialize(obj: Any): Any = { val row = new SpecificMutableRow(sqlType.asInstanceOf[StructType].map(_.dataType)) obj match { case n: NestedStruct => row.setInt(0, n.a) row.setLong(1, n.b) row.setDouble(2, n.c) } } override def userClass: Class[NestedStruct] = classOf[NestedStruct] override def deserialize(datum: Any): NestedStruct = { datum match { case row: InternalRow => NestedStruct(row.getInt(0), row.getLong(1), row.getDouble(2)) } } } }
chenc10/Spark-PAF
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
Scala
apache-2.0
19,967
package io.buoyant.linkerd.protocol.http import java.util.{TimeZone, logging => javalog} import com.twitter.finagle.Service import com.twitter.finagle.http._ import com.twitter.logging._ import com.twitter.util.{Future, Promise, Time, TimeFormat} import io.buoyant.test.Awaits import org.scalatest.FunSuite class AccessLoggerTest extends FunSuite with Awaits { object StringLogger extends Logger("string", javalog.Logger.getAnonymousLogger()) { val handler = new StringHandler(new Formatter { override def format(record: javalog.LogRecord): String = formatText(record) }, None) clearHandlers() addHandler(handler) setLevel(Level.INFO) def getLoggedLines(): String = handler.get } test("access logger filter") { val done = new Promise[Unit] // This timestamp is: Wed, 06 Jan 2016 21:21:26 GMT Time.withTimeAt(Time.fromSeconds(1452115286)) { tc => val timeFormat = new TimeFormat("dd/MM/yyyy:HH:mm:ss z", TimeZone.getDefault) val service = AccessLogger(StringLogger, timeFormat) andThen Service.mk[Request, Response] { req => val rsp = Response() rsp.status = Status.PaymentRequired rsp.contentType = "application/json" rsp.contentLength = 304374 Future.value(rsp) } val req = Request() req.method = Method.Head req.uri = "/foo?bar=bah" req.host = "monkeys" req.contentType = "text/plain" val f = service(req) assert( StringLogger.getLoggedLines() == """monkeys 0.0.0.0 - - [06/01/2016:21:21:26 GMT] "HEAD /foo?bar=bah HTTP/1.1" 402 304374 "-" "-"""" ) } } override protected def withFixture(test: NoArgTest) = { val currentTimezone = TimeZone.getDefault TimeZone.setDefault(TimeZone.getTimeZone("GMT")) try { super.withFixture(test) } finally { TimeZone.setDefault(currentTimezone) } } }
linkerd/linkerd
linkerd/protocol/http/src/test/scala/io/buoyant/linkerd/protocol/http/AccessLoggerTest.scala
Scala
apache-2.0
1,909
/* * Copyright 2013 - 2020 Outworkers Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.outworkers.phantom.finagle import com.outworkers.phantom.tables.{JodaRow, TestDatabase} import com.outworkers.util.samplers._ import com.twitter.util.{ Await, Future } import org.joda.time.{DateTime, DateTimeZone} import org.scalameter.api.{Gen => MeterGen, gen => _, _} import com.twitter.conversions.DurationOps._ class SpoolBenchmarkPerformanceTest extends Bench.LocalTime with TestDatabase.connector.Connector { TestDatabase.primitivesJoda.createSchema() TestDatabase.primitivesJoda.truncate() implicit object JodaTimeSampler extends Sample[DateTime] { override def sample: DateTime = DateTime.now(DateTimeZone.UTC) } val sampleSize = 30000 Iterator.fill(sampleSize)(gen[JodaRow]).grouped(256).foreach { rs => val chain = rs.map(r => TestDatabase.primitivesJoda.store(r).future.map(_ => ())) Await.ready(Future.collect(chain), 1.minutes) } val sizes: MeterGen[Int] = MeterGen.range("size")(10000, 30000, 10000) performance of "ResultSpool" in { measure method "fetchSpool" in { using(sizes) in { size => Await.ready { TestDatabase.primitivesJoda.select.limit(size).fetchSpool().flatMap(_.force) } } } } }
outworkers/phantom
phantom-finagle/src/test/scala/com/outworkers/phantom/finagle/SpoolBenchmarkPerformanceTest.scala
Scala
apache-2.0
1,812
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.optim import java.util.concurrent.LinkedBlockingQueue import com.intel.analytics.bigdl.Module import com.intel.analytics.bigdl.dllib.nn.abstractnn.Activity import com.intel.analytics.bigdl.serialization.Bigdl.AttrValue.ArrayValue import com.intel.analytics.bigdl.serialization.Bigdl.{AttrValue, BigDLTensor, DataType, TensorStorage} import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric.{NumericBoolean, NumericChar, NumericDouble, NumericFloat, NumericInt, NumericLong, NumericString} import com.intel.analytics.bigdl.dllib.utils._ import com.intel.analytics.bigdl.dllib.utils._ import com.intel.analytics.bigdl.dllib.utils.serializer.converters.DataConverter import com.intel.analytics.bigdl.dllib.utils.serializer.{DeserializeContext, ModuleSerializer, ProtoStorageType, SerializeContext} import scala.collection.JavaConverters._ import scala.collection.mutable import scala.reflect.ClassTag import scala.reflect.runtime.universe.Type import scala.util.{Failure, Success, Try} /** * <h6>Thread-safe Prediction Service for Concurrent Calls</h6> * In this service, concurrency is kept not greater than [[numThreads]] by a `BlockingQueue`, * which contains available model instances. * <br><br> * [[numThreads]] model instances sharing weights/bias * will be put into the `BlockingQueue` during initialization. * <br><br> * When predict method called, service will try to take an instance from `BlockingQueue`, * which means if all instances are on serving, the predicting request will be blocked until * some instances are released. * <br><br> * If exceptions caught during predict, * a scalar Tensor[String] will be returned with thrown message. * * @param model BigDL model used to do predictions * @param numThreads max concurrency */ class PredictionService[T: ClassTag] private[optim]( model: Module[T], numThreads: Int )(implicit ev: TensorNumeric[T]) { protected val instQueue: LinkedBlockingQueue[Module[T]] = { val shallowCopies = (1 to numThreads) .map(_ => model.clone(false).evaluate()).asJava new LinkedBlockingQueue[Module[T]](shallowCopies) } /** * <h6>Thread-safe single sample prediction</h6> * Running model prediction with input Activity as soon as * there exists vacant instances(the size of pool is [[numThreads]]). * Otherwise, it will hold on till some instances are released. * <br><br> * Outputs will be deeply copied after model prediction, so they are invariant. * * @param request input Activity, could be Tensor or Table(key, Tensor) * @return output Activity, could be Tensor or Table(key, Tensor) */ def predict(request: Activity): Activity = { // Take an instance from blocking queue, // it will cause a thread blocking when no instance is available. val module = instQueue.take() // do predictions val forwardResult = Try(module.forward(request)) match { case Success(activity) => activity case Failure(e) => errorTensor("running forward", e) } // cloned values after prediction finished val output = try { forwardResult match { case tensor: Tensor[_] => tensor.clone() case table: Table => val clonedMap = mutable.HashMap[Any, Any]() table.getState().foreach { x => (x: @unchecked) match { case (k: Tensor[_], v: Tensor[_]) => clonedMap += k.clone() -> v.clone() case (k, v: Tensor[_]) => clonedMap += k -> v.clone() } } new Table(clonedMap) } } catch { case e: Throwable => errorTensor("Clone Result", e) } finally { // Release module instance back to blocking queue instQueue.offer(module) } output } /** * <h6>Thread-safe single sample prediction</h6> * Firstly, deserialization tasks will be run with inputs(Array[Byte]). * <br><br> * Then, run model prediction with deserialized inputs * as soon as there exists vacant instances(total number is [[numThreads]]). * Otherwise, it will hold on till some instances are released. * <br><br> * Finally, prediction results will be serialized to Array[Byte] according to BigDL.proto. * * @param request input bytes, which will be deserialized by BigDL.proto * @return output bytes, which is serialized by BigDl.proto */ def predict(request: Array[Byte]): Array[Byte] = { val output = Try( PredictionService.deSerializeActivity(request) ) match { case Success(activity) => predict(activity) case Failure(e) => errorTensor("DeSerialize Input", e) } val bytesOut = try { PredictionService.serializeActivity(output) } catch { case e: Throwable => val act = errorTensor("Serialize Output", e) PredictionService.serializeActivity(act) } bytesOut } private def errorTensor(stage: String, e: Throwable): Tensor[String] = { val msg = s"Exception caught during [$stage]! \\n" + s"The message is ${e.getMessage} \\n" + s"The cause is ${e.getCause}" Tensor.scalar(msg) } } object PredictionService { /** * <h6>Thread-safe Prediction Service for Concurrent Calls</h6> * In this service, concurrency is kept not greater than `numThreads` by a `BlockingQueue`, * which contains available model instances. * <br><br> * If exceptions caught during predict, * a scalar Tensor[String] will be returned with thrown message. * * @param model BigDL model used to do predictions * @param numThreads max concurrency * @return a PredictionService instance */ def apply[T: ClassTag]( model: Module[T], numThreads: Int )(implicit ev: TensorNumeric[T]): PredictionService[T] = { new PredictionService[T](model, numThreads) } /** * <h6>Serialize activities to Array[Byte] according to `Bigdl.proto`.</h6> * For now, `Tensor` and `Table[primitive|Tensor, Tensor]` are supported. * * @param activity activity to be serialized */ def serializeActivity(activity: Activity): Array[Byte] = { val attrBuilder = AttrValue.newBuilder() activity match { case table: Table => var keyIsPrimitive = true val firstKey = table.getState().head._1 val tensorState: Array[(Tensor[_], Tensor[_])] = firstKey match { case _: Tensor[_] => keyIsPrimitive = false table.getState().map { x => (x: @unchecked) match { case (k: Tensor[_], v: Tensor[_]) => k -> v }}.toArray case _: Int => table.getState().map { x => (x: @unchecked) match { case (k: Int, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case _: Long => table.getState().map { x => (x: @unchecked) match { case (k: Long, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case _: Char => table.getState().map { x => (x: @unchecked) match { case (k: Char, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case _: Short => table.getState().map {x => (x: @unchecked) match { case (k: Short, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case _: Float => table.getState().map { x => (x: @unchecked) match { case (k: Float, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case _: Double => table.getState().map { x => (x: @unchecked) match { case (k: Double, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case _: Boolean => table.getState().map { x => (x: @unchecked) match { case (k: Boolean, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case _: String => table.getState().map { x => (x: @unchecked) match { case (k: String, v: Tensor[_]) => Tensor.scalar(k) -> v }}.toArray case key => throw new UnsupportedOperationException(s"Unsupported Table key: $key!") } val (keys, values) = tensorState.unzip // tensors structure: [isKeyPrimitive, keys, values] val tensors = Array(Tensor.scalar(keyIsPrimitive)) ++ keys ++ values val arrayValue = ArrayValue.newBuilder arrayValue.setDatatype(DataType.TENSOR) arrayValue.setSize(tensors.length) tensors.foreach { tensor => arrayValue.addTensor(buildBigDLTensor(tensor, attrBuilder)) attrBuilder.clear() } attrBuilder.setDataType(DataType.ARRAY_VALUE) attrBuilder.setArrayValue(arrayValue) case tensor: Tensor[_] => attrBuilder.setTensorValue(buildBigDLTensor(tensor, attrBuilder)) case _ => throw new UnsupportedOperationException("Unsupported Activity Type!") } val attr = attrBuilder.build() attr.toByteArray } /** * <h6>Deserialize Array[Byte] to activities according to `Bigdl.proto`.</h6> * For now, `Tensor` and `Table[primitive|Tensor, Tensor]` are supported. * It will convert `AttrValue(Array(BigdlTensor))` to a `Table`. * It will convert `AttrValue(BigdlTensor) ` to a `Tensor`. * * @param bytes bytes data for Activity to be deserialized */ def deSerializeActivity(bytes: Array[Byte]): Activity = { val attr = AttrValue.parseFrom(bytes) attr.getDataType match { case DataType.ARRAY_VALUE => val dataType = attr.getArrayValue.getTensor(0).getDatatype // tensors structure: [isKeyPrimitive, keys, values] val tensors = getAttr(dataType, attr).asInstanceOf[Array[Tensor[_]]] val nElement = (tensors.length - 1) / 2 val keyIsPrimitive = tensors.head.asInstanceOf[Tensor[Boolean]].value() val _keys = tensors.slice(1, nElement + 1) val keys = if (keyIsPrimitive) _keys.map(_.value()) else _keys val values = tensors.slice(nElement + 1, tensors.length) val table = T() keys.zip(values).foreach { case(k, v) => table.update(k, v) } table case DataType.TENSOR => val tValue = attr.getTensorValue val tensor = getAttr(tValue.getDatatype, attr) tensor.asInstanceOf[Tensor[_]] case tpe => throw new UnsupportedOperationException(s"Unsupported DataType($tpe)!") } } private def buildBigDLTensor(tensor: Tensor[_], attrBuilder: AttrValue.Builder): BigDLTensor = { val status = mutable.HashMap[Int, Any]() val partial = partialSetAttr(tensor.getTensorNumeric(), status) partial(attrBuilder, tensor, ModuleSerializer.tensorType) val tensorId = System.identityHashCode(tensor) val _tensor = status(tensorId).asInstanceOf[BigDLTensor] val tensorBuilder = BigDLTensor.newBuilder(_tensor) val storageId = System.identityHashCode(tensor.storage().array()) val _storage = status(storageId).asInstanceOf[TensorStorage] tensorBuilder.setStorage(_storage) tensorBuilder.build() } private def partialSetAttr(numeric: TensorNumeric[_], status: mutable.HashMap[Int, Any]) = { numeric match { case NumericFloat => val sc = SerializeContext[Float](null, status, ProtoStorageType) (attrBuilder: AttrValue.Builder, value: Any, tpe: Type) => DataConverter.setAttributeValue[Float](sc, attrBuilder, value, tpe) case NumericDouble => val sc = SerializeContext[Double](null, status, ProtoStorageType) (attrBuilder: AttrValue.Builder, value: Any, tpe: Type) => DataConverter.setAttributeValue[Double](sc, attrBuilder, value, tpe) case NumericChar => val sc = SerializeContext[Char](null, status, ProtoStorageType) (attrBuilder: AttrValue.Builder, value: Any, tpe: Type) => DataConverter.setAttributeValue[Char](sc, attrBuilder, value, tpe) case NumericBoolean => val sc = SerializeContext[Boolean](null, status, ProtoStorageType) (attrBuilder: AttrValue.Builder, value: Any, tpe: Type) => DataConverter.setAttributeValue[Boolean](sc, attrBuilder, value, tpe) case NumericString => val sc = SerializeContext[String](null, status, ProtoStorageType) (attrBuilder: AttrValue.Builder, value: Any, tpe: Type) => DataConverter.setAttributeValue[String](sc, attrBuilder, value, tpe) case NumericInt => val sc = SerializeContext[Int](null, status, ProtoStorageType) (attrBuilder: AttrValue.Builder, value: Any, tpe: Type) => DataConverter.setAttributeValue[Int](sc, attrBuilder, value, tpe) case NumericLong => val sc = SerializeContext[Long](null, status, ProtoStorageType) (attrBuilder: AttrValue.Builder, value: Any, tpe: Type) => DataConverter.setAttributeValue[Long](sc, attrBuilder, value, tpe) } } private def getAttr(dataType: DataType, attr: AttrValue) = { val status = mutable.HashMap[Int, Any]() val dsc = DeserializeContext(null, status, ProtoStorageType) dataType match { case DataType.INT32 => DataConverter.getAttributeValue[Int](dsc, attr) case DataType.INT64 => DataConverter.getAttributeValue[Long](dsc, attr) case DataType.FLOAT => DataConverter.getAttributeValue[Float](dsc, attr) case DataType.DOUBLE => DataConverter.getAttributeValue[Double](dsc, attr) case DataType.STRING => DataConverter.getAttributeValue[String](dsc, attr) case DataType.BOOL => DataConverter.getAttributeValue[Boolean](dsc, attr) case DataType.CHAR => DataConverter.getAttributeValue[Char](dsc, attr) case _ => throw new UnsupportedOperationException(s"Unsupported DataType($dataType)!") } } }
intel-analytics/BigDL
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/optim/PredictionService.scala
Scala
apache-2.0
14,476
/* * Copyright 2016 Dennis Vriend * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.dnvriend.scaffold.play.enabler.lagom import com.github.dnvriend.scaffold.play.enabler.EnablerContext // see: https://github.com/lagom/lagom/tree/1.3.0-M1 object LagomEnabler { } object Template { def settings(ctx: EnablerContext): String = s""" |libraryDependencies += "com.lightbend.lagom" %% "lagom-scaladsl-api" % "${ctx.lagomVersion}" |libraryDependencies += "com.lightbend.lagom" %% "lagom-logback" % "${ctx.lagomVersion}" |libraryDependencies += "com.lightbend.lagom" %% "lagom-scaladsl-persistence-cassandra" % "${ctx.lagomVersion}" |libraryDependencies += "com.lightbend.lagom" %% "lagom-reloadable-server" % "${ctx.lagomVersion}" |libraryDependencies += "com.lightbend.lagom" %% "lagom-scaladsl-testkit" % "${ctx.lagomVersion}" % Test """.stripMargin }
dnvriend/sbt-scaffold-play
src/main/scala/com/github/dnvriend/scaffold/play/enabler/lagom/LagomEnabler.scala
Scala
apache-2.0
1,428
package com.zengularity.trollbot import akka.stream._ import akka.stream.io._ import akka.stream.scaladsl._ import akka.actor._ import akka.http.scaladsl.model._ import akka.http.scaladsl._ import akka.util.ByteString import scala.concurrent.duration._ import scala.concurrent._ object Main { def main(args: Array[String]) { println("-- Hello trollers --") args.map { arg => println("Keywords:" + arg) } implicit val system = ActorSystem("reactive-tweets") implicit val mat = ActorMaterializer() TwitterApp.getStream(args) // val tweets: Source[Tweet, Unit] = ??? // val connectionFlow = Http().outgoingConnectionTls("stream.twitter.com") // val baseRequest = HttpRequest(method= HttpMethods.POST, uri = "/1.1/statuses/filter.json") // val signedRequest = signedRequest(baseRequest) // val response = Source.single(HttpRequest(method= HttpMethods.POST, uri = "/1.1/statuses/filter.json")) // .via(connectionFlow) // .runWith(Sink.head) // // val res = Await.result(response, 1 second) // println(res) // //system.shutdown() } // def signedRequest(req: HttpRequest): HttpRequest = { // // } def twitterActorSource(terms: Array[String])(implicit mat: Materializer): Source[Tweet, Unit] = { val (actorRef, publisher) = Source.actorRef[Tweet](1000, OverflowStrategy.dropHead).toMat(Sink.publisher)(Keep.both).run() val twitterListener = new twitter4j.StatusListener() { def onStatus(status: twitter4j.Status) { val t = Tweet(status.getUser().getName(), status.getText, Option(status.getInReplyToStatusId())) actorRef ! t } def onDeletionNotice(statusDeletionNotice: twitter4j.StatusDeletionNotice) {} def onTrackLimitationNotice(numberOfLimitedStatuses: Int) {} def onException(ex: Exception) { ex.printStackTrace } def onScrubGeo(arg0: Long, arg1: Long) {} def onStallWarning(warning: twitter4j.StallWarning) {} } TwitterApp.setupStream(twitterListener, terms) Source(publisher) } def twitterQueueSource(terms: Array[String]): Source[Tweet, Unit] = { Source(() => TwitterApp.getStream(terms)) } val fileSink: Sink[ByteString, Future[Long]] = SynchronousFileSink(new java.io.File("tweets.csv"), append = true) }
vdebergue/trollbot
src/main/scala/com/zengularity/trollbot/Main.scala
Scala
mit
2,305
package org.scalacoin.script import org.scalacoin.crypto.{TransactionSignatureComponentFactory, TransactionSignatureComponent} import org.scalacoin.protocol.script.{ScriptSignature, ScriptPubKey} import org.scalacoin.protocol.transaction.Transaction import org.scalacoin.script.constant._ import org.scalacoin.script.error.ScriptError import org.scalacoin.script.flag.ScriptFlag import org.scalacoin.util.Factory /** * Created by chris on 2/3/16. */ sealed trait ScriptProgram { /** * This contains all relevant information for hashing and checking a signature for a bitcoin transaction * * @return */ def txSignatureComponent : TransactionSignatureComponent /** * The current state of the stack for execution of the program * * @return */ def stack : List[ScriptToken] /** * The script operations that need to still be executed * * @return */ def script : List[ScriptToken] /** * The original script that was given t * * @return */ def originalScript : List[ScriptToken] /** * The alternative stack is used in some Script op codes * * @return */ def altStack : List[ScriptToken] /** * Flags that are run with the script * these flags indicate special conditions that a script needs to be run with * see: https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.h#L31 * * @return */ def flags : Seq[ScriptFlag] /** * Returns if the stack top is true * * @return */ def stackTopIsTrue = !stackTopIsFalse /** * Returns if the stack top is false * * @return */ def stackTopIsFalse : Boolean = { if (stack.headOption.isDefined && (stack.head.hex == OP_FALSE.hex || stack.head.hex == ScriptNumberFactory.negativeZero.hex || stack.head.hex == ScriptNumberFactory.zero.hex)) true else if (!stack.headOption.isDefined) true else false } } /** * This represents a ScriptProgram before any script operations have been executed in the interpreter */ sealed trait PreExecutionScriptProgram extends ScriptProgram sealed trait ExecutionInProgressScriptProgram extends ScriptProgram { /** * The index of the last OP_CODESEPARATOR * @return */ def lastCodeSeparator : Int } sealed trait ExecutedScriptProgram extends ScriptProgram { /** * Indicates if the program has encountered a ScriptError in its execution * @return */ def error : Option[ScriptError] } /** * Factory companion object for ScriptProgram */ object ScriptProgram { /** * Implentation type for a script program that has not been executed at all * @param txSignatureComponent * @param stack * @param script * @param originalScript * @param altStack * @param flags */ private sealed case class PreExecutionScriptProgramImpl(txSignatureComponent : TransactionSignatureComponent, stack : List[ScriptToken],script : List[ScriptToken], originalScript : List[ScriptToken], altStack : List[ScriptToken], flags : Seq[ScriptFlag]) extends PreExecutionScriptProgram /** * Implementation type for a script program that is currently being executed by the script interpreter * @param txSignatureComponent * @param stack * @param script * @param originalScript * @param altStack * @param flags * @param lastCodeSeparator */ private sealed case class ExecutionInProgressScriptProgramImpl(txSignatureComponent : TransactionSignatureComponent, stack : List[ScriptToken],script : List[ScriptToken], originalScript : List[ScriptToken], altStack : List[ScriptToken], flags : Seq[ScriptFlag], lastCodeSeparator : Int = 0) extends ExecutionInProgressScriptProgram /** * The implementation type for a script program that is finished being executed by the script interpreter * @param txSignatureComponent * @param stack * @param script * @param originalScript * @param altStack * @param flags * @param error */ private sealed case class ExecutedScriptProgramImpl(txSignatureComponent : TransactionSignatureComponent, stack : List[ScriptToken],script : List[ScriptToken], originalScript : List[ScriptToken], altStack : List[ScriptToken], flags : Seq[ScriptFlag], error : Option[ScriptError]) extends ExecutedScriptProgram //indicates whether the script or the stack needs to be updated sealed trait UpdateIndicator case object Stack extends UpdateIndicator case object Script extends UpdateIndicator case object AltStack extends UpdateIndicator /** * Sets an error on the script program * @param oldProgram the program who has hit an invalid state * @param error the error that thet program hit while being executed in the script interpreter * @return the ExecutedScriptProgram with the given error set inside of the trait */ def factory(oldProgram : ScriptProgram, error : ScriptError) : ExecutedScriptProgram = oldProgram match { case program : PreExecutionScriptProgram => throw new RuntimeException("We cannot set an error on the script program before it is executed") case program : ExecutionInProgressScriptProgram => ExecutedScriptProgramImpl(program.txSignatureComponent, program.stack, program.script,program.originalScript, program.altStack, program.flags, Some(error)) case program : ExecutedScriptProgram => ExecutedScriptProgramImpl(program.txSignatureComponent, program.stack, program.script,program.originalScript, program.altStack, program.flags, Some(error)) } /** * Updates the program script verify flags * @param oldProgram * @param flags * @return */ def factory(oldProgram : ScriptProgram, flags : Seq[ScriptFlag]) : ScriptProgram = oldProgram match { case program : PreExecutionScriptProgram => PreExecutionScriptProgramImpl(program.txSignatureComponent,program.stack,program.script,program.originalScript, program.altStack,flags) case program : ExecutionInProgressScriptProgram => ExecutionInProgressScriptProgramImpl(program.txSignatureComponent, program.stack,program.script,program.originalScript, program.altStack, flags, program.lastCodeSeparator) case program : ExecutedScriptProgram => throw new RuntimeException("Cannot update the script flags on a program that has been executed") } /** * Changes the tokens in either the Stack or the Script depending in the indicator * @param oldProgram * @param tokens * @param indicator * @return */ def factory(oldProgram : ScriptProgram, tokens : Seq[ScriptToken], indicator : UpdateIndicator) : ScriptProgram = { indicator match { case Stack => oldProgram match { case program : PreExecutionScriptProgram => PreExecutionScriptProgramImpl(program.txSignatureComponent, tokens.toList,program.script,program.originalScript, program.altStack,program.flags) case program : ExecutionInProgressScriptProgram => ExecutionInProgressScriptProgramImpl(program.txSignatureComponent,tokens.toList,program.script,program.originalScript, program.altStack,program.flags,program.lastCodeSeparator) case program : ExecutedScriptProgram => throw new RuntimeException("Cannot update stack for program that has been fully executed") } case Script => oldProgram match { case program : PreExecutionScriptProgram => PreExecutionScriptProgramImpl(program.txSignatureComponent, program.stack,tokens.toList,program.originalScript, program.altStack,program.flags) case program : ExecutionInProgressScriptProgram => ExecutionInProgressScriptProgramImpl(program.txSignatureComponent, program.stack, tokens.toList, program.originalScript, program.altStack, program.flags) case program : ExecutedScriptProgram => throw new RuntimeException("Cannot update the script for a program that has been fully executed") } case AltStack => oldProgram match { case program : PreExecutionScriptProgram => PreExecutionScriptProgramImpl(program.txSignatureComponent, program.stack,program.script,program.originalScript, tokens.toList,program.flags) case program : ExecutionInProgressScriptProgram => ExecutionInProgressScriptProgramImpl(program.txSignatureComponent, program.stack, program.script, program.originalScript, tokens.toList, program.flags) case program : ExecutedScriptProgram => throw new RuntimeException("Cannot update the alt stack for a program that has been fully executed") } } } /** * Changes the stack tokens and script tokens in a ScriptProgram * @param oldProgram * @param stackTokens * @param scriptTokens * @return */ def factory(oldProgram : ScriptProgram, stackTokens : Seq[ScriptToken], scriptTokens : Seq[ScriptToken]) : ScriptProgram = { val updatedStack = apply(oldProgram,stackTokens,Stack) val updatedScript = apply(updatedStack,scriptTokens,Script) updatedScript } /** * Updates the last OP_CODESEPARATOR index * @param oldProgram * @param lastCodeSeparator * @return */ def factory(oldProgram : ExecutionInProgressScriptProgram, lastCodeSeparator : Int) : ExecutionInProgressScriptProgram = { ExecutionInProgressScriptProgramImpl(oldProgram.txSignatureComponent, oldProgram.stack, oldProgram.script, oldProgram.originalScript, oldProgram.altStack, oldProgram.flags,lastCodeSeparator) } /** * Updates the tokens in either the stack or script and the last OP_CODESEPARATOR index * @param oldProgram * @param tokens * @param indicator * @param lastCodeSeparator * @return */ def factory(oldProgram : ExecutionInProgressScriptProgram, tokens : Seq[ScriptToken], indicator: UpdateIndicator, lastCodeSeparator : Int) : ExecutionInProgressScriptProgram = { val updatedIndicator = apply(oldProgram, tokens, indicator) updatedIndicator match { case e : ExecutionInProgressScriptProgram => apply(e,lastCodeSeparator) case _ : PreExecutionScriptProgram | _ : ExecutedScriptProgram => throw new RuntimeException("We must have a ExecutionInProgressScriptProgram to update the last OP_CODESEPARATOR index") } } /** * Updates the stack, script, alt stack of the given oldProgram * @param oldProgram * @param stack * @param script * @param altStack * @return */ def factory(oldProgram : ScriptProgram, stack : Seq[ScriptToken], script : Seq[ScriptToken], altStack : Seq[ScriptToken]) : ScriptProgram = { val updatedProgramStack = apply(oldProgram,stack, Stack) val updatedProgramScript = apply(updatedProgramStack, script, Script) val updatedProgramAltStack = apply(updatedProgramScript, altStack, AltStack) updatedProgramAltStack } /** * Creates a new script program that can be used to verify if a transaction at the given inputIndex * spends a given scriptPubKey correctly. Assumes that the script to be executed is the * scriptSignature at the given input index * * @param transaction the transaction that is being checked * @param scriptPubKey the scriptPubKey for which the input is spending * @param inputIndex the input's index inside of transaction which we are spending * @param flags the flags which we are enforcing inside of the script interpreter * @return the script program representing all of this information */ def factory(transaction: Transaction, scriptPubKey : ScriptPubKey, inputIndex : Int, flags : Seq[ScriptFlag]) : PreExecutionScriptProgram = { val script = transaction.inputs(inputIndex).scriptSignature.asm apply(transaction,scriptPubKey,inputIndex,script.toList,flags) } /** * Creates a new script program that can be used to verify if a transaction at the given inputIndex * spends a given scriptPubKey correctly * * @param transaction the transaction that is being checked * @param scriptPubKey the scriptPubKey for which the input is spending * @param inputIndex the input's index inside of transaction which we are spending * @param script the script that we are currently executing * @param flags the flags which we are enforcing inside of the script interpreter * @return the script program representing all of this information */ def factory(transaction: Transaction, scriptPubKey : ScriptPubKey, inputIndex : Int, script : Seq[ScriptToken], flags : Seq[ScriptFlag]) : PreExecutionScriptProgram = { val txSignatureComponent = TransactionSignatureComponentFactory.factory(transaction,inputIndex,scriptPubKey,flags) PreExecutionScriptProgramImpl(txSignatureComponent,List(),script.toList,script.toList,List(),flags) } /** * The intention for this factory function is to allow us to create a program that already has a stack state. This * is useful for after execution of a scriptSig, copying the stack into this program with the scriptPubKey read to * run inside the script variable * @param transaction the transaction being checked * @param scriptPubKey the scriptPubKey which the input is spending * @param inputIndex the input's index inside of the transaction we are spending * @param stack the current stack state of the program * @param script the script that we need to execute * @param flags the flags which we are enforcing inside of the script interpeter */ def factory(transaction: Transaction, scriptPubKey : ScriptPubKey, inputIndex : Int, stack : Seq[ScriptToken], script : Seq[ScriptToken], flags : Seq[ScriptFlag]) : ScriptProgram = { val program = factory(transaction,scriptPubKey,inputIndex,script,flags) apply(program,stack,Stack) } /** * The intention for this factory function is to allow us to create a program that already has a stack state. This * is useful for after execution of a scriptSig, copying the stack into this program with the scriptPubKey read to * run inside the script variable * * @param txSignatureComponent the relevant transaction information for execution of a script program * @param stack the current stack state of the program * @param script the script that we need to execute * @return */ def factory(txSignatureComponent : TransactionSignatureComponent, stack : Seq[ScriptToken], script : Seq[ScriptToken]) : ScriptProgram = { apply(txSignatureComponent.transaction,txSignatureComponent.scriptPubKey,txSignatureComponent.inputIndex, stack,script,txSignatureComponent.flags) } def apply(oldProgram : ScriptProgram, error : ScriptError) : ExecutedScriptProgram = factory(oldProgram, error) def apply(oldProgram : ScriptProgram, flags : Seq[ScriptFlag]) : ScriptProgram = factory(oldProgram, flags) def apply(oldProgram : ScriptProgram, tokens : Seq[ScriptToken], indicator : UpdateIndicator) : ScriptProgram = factory(oldProgram, tokens, indicator) def apply(oldProgram : ScriptProgram, stackTokens : Seq[ScriptToken], scriptTokens : Seq[ScriptToken]) : ScriptProgram = factory(oldProgram, stackTokens, scriptTokens) def apply(oldProgram : ExecutionInProgressScriptProgram, lastCodeSeparator : Int) : ExecutionInProgressScriptProgram = factory(oldProgram, lastCodeSeparator) def apply(oldProgram : ExecutionInProgressScriptProgram, tokens : Seq[ScriptToken], indicator: UpdateIndicator, lastCodeSeparator : Int) : ExecutionInProgressScriptProgram = factory(oldProgram, tokens, indicator, lastCodeSeparator) def apply(oldProgram : ScriptProgram, stack : Seq[ScriptToken], script : Seq[ScriptToken], altStack : Seq[ScriptToken], updateIndicator: UpdateIndicator) : ScriptProgram = factory(oldProgram, stack, script, altStack) def apply(transaction: Transaction, scriptPubKey : ScriptPubKey, inputIndex : Int, flags : Seq[ScriptFlag]) : PreExecutionScriptProgram = factory(transaction, scriptPubKey, inputIndex, flags) def apply(transaction: Transaction, scriptPubKey : ScriptPubKey, inputIndex : Int, script : Seq[ScriptToken], flags : Seq[ScriptFlag]) : PreExecutionScriptProgram = factory(transaction, scriptPubKey, inputIndex, script, flags) def apply(transaction: Transaction, scriptPubKey : ScriptPubKey, inputIndex : Int, stack : Seq[ScriptToken], script : Seq[ScriptToken], flags : Seq[ScriptFlag]) : ScriptProgram = factory(transaction, scriptPubKey, inputIndex, stack, script, flags) def apply(txSignatureComponent : TransactionSignatureComponent, stack : Seq[ScriptToken], script : Seq[ScriptToken]) : ScriptProgram = factory(txSignatureComponent, stack, script) /** * Changes a program that is being executed inside o * @param executionInProgressScriptProgram * @return */ def toExecutedProgram(executionInProgressScriptProgram: ExecutionInProgressScriptProgram) : ExecutedScriptProgram = { ExecutedScriptProgramImpl(executionInProgressScriptProgram.txSignatureComponent, executionInProgressScriptProgram.stack, executionInProgressScriptProgram.script,executionInProgressScriptProgram.originalScript,executionInProgressScriptProgram.altStack, executionInProgressScriptProgram.flags,None) } /** * Takes a script program that is pre execution and changes it to an execution in progress script program * @param preExecutionScriptProgram * @return */ def toExecutionInProgress(preExecutionScriptProgram: PreExecutionScriptProgram) : ExecutionInProgressScriptProgram = { toExecutionInProgress(preExecutionScriptProgram,None) } /** * Changes a pre execution script program to a execution in progress script program with the given stack state * @param preExecutionScriptProgram * @param stack * @return */ def toExecutionInProgress(preExecutionScriptProgram: PreExecutionScriptProgram, stack : Option[List[ScriptToken]]) : ExecutionInProgressScriptProgram = { stack match { case Some(stackTokens) => ExecutionInProgressScriptProgramImpl(preExecutionScriptProgram.txSignatureComponent,stackTokens,preExecutionScriptProgram.script, preExecutionScriptProgram.originalScript,preExecutionScriptProgram.altStack,preExecutionScriptProgram.flags, 0) case None => ExecutionInProgressScriptProgramImpl(preExecutionScriptProgram.txSignatureComponent,preExecutionScriptProgram.stack,preExecutionScriptProgram.script, preExecutionScriptProgram.originalScript,preExecutionScriptProgram.altStack,preExecutionScriptProgram.flags, 0) } } }
TomMcCabe/scalacoin
src/main/scala/org/scalacoin/script/ScriptProgram.scala
Scala
mit
18,775
package org.scalajs.testsuite.javalib import java.{util => ju} abstract class AbstractSetTest[F <: AbstractSetFactory](val factory: F) extends SetTest { describe(factory.implementationName) { testApi() } def testApi(): Unit = { testSetApi(factory) } } object AbstractSetFactory { def allFactories: Iterator[AbstractSetFactory] = HashSetFactory.allFactories } trait AbstractSetFactory extends SetFactory { def empty[E]: ju.AbstractSet[E] }
doron123/scala-js
test-suite/src/test/scala/org/scalajs/testsuite/javalib/AbstractSetTest.scala
Scala
bsd-3-clause
469
package fpinscala.errorhandling import scala.{Option => _, Some => _, Either => _, _} // hide std library `Option`, `Some` and `Either`, since we are writing our own in this chapter sealed trait Option[+A] { def map[B](f: A => B): Option[B] = this match { case Some(a) => Some(f(a)) case None => None } def getOrElse[B >:A](default: => B): B = this match { case Some(a) => a case None => default } def flatMap[B](f: A => Option[B]): Option[B] = this match { case None => None case Some(a) => f(a) } def orElse[B >: A](ob: =>Option[B]): Option[B] = this match { case None => ob case _ => this } def filter(f: A => Boolean): Option[A] = this match { case None => None case Some(a) => if (f(a)) this else None } } case class Some[+A](get: A) extends Option[A] case object None extends Option[Nothing] object Option { def failingFn(i: Int): Int = { val y: Int = throw new Exception("fail!") // `val y: Int = ...` declares `y` as having type `Int`, and sets it equal to the right hand side of the `=`. try { val x = 42 + 5 x + y } catch { case e: Exception => 43 } // A `catch` block is just a pattern matching block like the ones we've seen. `case e: Exception` is a pattern that matches any `Exception`, and it binds this value to the identifier `e`. The match returns the value 43. } def failingFn2(i: Int): Int = { try { val x = 42 + 5 x + ((throw new Exception("fail!")): Int) // A thrown Exception can be given any type; here we're annotating it with the type `Int` } catch { case e: Exception => 43 } } def mean(xs: Seq[Double]): Option[Double] = if (xs.isEmpty) None else Some(xs.sum / xs.length) def variance(xs: Seq[Double]): Option[Double] = { val mv = mean(xs) mv.flatMap((m) => mean(xs.map((x) => math.pow(x-m,2)))) } def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] = { a flatMap ((x) => b flatMap ((y) => Some(f(x, y)))) } def sequence[A](a: List[Option[A]]): Option[List[A]] = a match { case Nil => Some(Nil) case (x::xs) => x flatMap (n => sequence(xs) map (n :: _)) } def traverse[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] = a match { case Nil => Some(Nil) case (x::xs) => f(x) flatMap (n => traverse(xs)(f) map (n :: _)) } }
everyevery/fpinscala
exercises/src/main/scala/fpinscala/errorhandling/Option.scala
Scala
mit
2,357
package com.twitter.util import com.twitter.util._ import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import org.junit.runner.RunWith import scala.collection.mutable import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.VectorBuilder @RunWith(classOf[JUnitRunner]) class EventTest extends FunSuite { test("pub/sub while active") { val e = Event[Int]() val ref = new AtomicReference[Seq[Int]](Seq.empty) val sub = e.build.register(Witness(ref)) assert(ref.get === Seq.empty) e.notify(1) assert(ref.get === Seq(1)) e.notify(2) assert(ref.get === Seq(1, 2)) Await.ready(sub.close()) e.notify(3) assert(ref.get === Seq(1, 2)) } test("Event.collect") { val e = Event[Int]() val events = e collect { case i if i%2==0 => i*2 } val ref = new AtomicReference[Seq[Int]](Seq.empty) events.build.register(Witness(ref)) e.notify(1) assert(ref.get === Seq.empty) e.notify(2) assert(ref.get === Seq(4)) e.notify(3); e.notify(4) assert(ref.get === Seq(4,8)) } test("Event.foldLeft") { val e = Event[Int]() val sum = e.foldLeft(0) (_+_) val ref = new AtomicReference[Int](0) sum.register(Witness(ref)) e.notify(0) assert(ref.get === 0) e.notify(1) assert(ref.get === 1) e.notify(12) assert(ref.get === 13) } test("Event.sliding") { val e = Event[Int]() val w = e.sliding(3) val ref = new AtomicReference[Seq[Int]](Seq.empty) w.register(Witness(ref)) e.notify(1) assert(ref.get === Seq(1)) e.notify(2) assert(ref.get === Seq(1,2)) e.notify(3) assert(ref.get === Seq(1,2,3)) e.notify(4) assert(ref.get === Seq(2,3,4)) } test("Event.mergeMap") { val e = Event[Int]() val inners = new mutable.ArrayBuffer[Witness[String]] val e2 = e mergeMap { i => val e = Event[String]() inners += e e } val ref = new AtomicReference[String]("") val closable = e2.register(Witness(ref)) assert(inners.isEmpty) e.notify(1) assert(inners.size === 1) assert(ref.get === "") inners(0).notify("okay") assert(ref.get === "okay") e.notify(2) assert(inners.size === 2) assert(ref.get === "okay") inners(0).notify("notokay") assert(ref.get === "notokay") inners(1).notify("yay") assert(ref.get === "yay") } test("Event.mergeMap closes constituent witnesses") { @volatile var n = 0 val e1, e2 = new Event[Int] { def register(w: Witness[Int]) = { n += 1 w.notify(1) Closable.make { _ => n -= 1; Future.Done } } } val e12 = e1 mergeMap { _ => e2 } val ref = new AtomicReference(Seq.empty[Int]) val closable = e12.build.register(Witness(ref)) assert(ref.get === Seq(1)) assert(n === 2) Await.result(closable.close()) assert(n === 0) } test("Event.select") { val e1 = Event[Int]() val e2 = Event[String]() val e = e1 select e2 val ref = new AtomicReference[Seq[Either[Int, String]]](Seq.empty) e.build.register(Witness(ref)) assert(ref.get.isEmpty) e1.notify(1) e1.notify(2) e2.notify("1") e1.notify(3) e2.notify("2") assert(ref.get === Seq(Left(1), Left(2), Right("1"), Left(3), Right("2"))) } test("Event.zip") { val e1 = Event[Int]() val e2 = Event[String]() val e = e1 zip e2 val ref = new AtomicReference[Seq[(Int, String)]](Seq.empty) e.build.register(Witness(ref)) for (i <- 0 until 50) e1.notify(i) for (i <- 0 until 50) e2.notify(i.toString) for (i <- 50 until 100) e2.notify(i.toString) for (i <- 50 until 100) e1.notify(i) assert(ref.get === ((0 until 100) zip ((0 until 100) map(_.toString)))) } test("Event.joinLast") { val e1 = Event[Int]() val e2 = Event[String]() val e = e1 joinLast e2 val ref = new AtomicReference[(Int, String)]((0, "")) e.register(Witness(ref)) assert(ref.get === (0, "")) e1.notify(1) assert(ref.get === (0, "")) e2.notify("ok") assert(ref.get === (1, "ok")) e2.notify("ok1") assert(ref.get === (1, "ok1")) e1.notify(2) assert(ref.get === (2, "ok1")) } test("Event.take") { val e = Event[Int]() val e1 = e.take(5) val ref = new AtomicReference[Seq[Int]](Seq.empty) e1.build.register(Witness(ref)) e.notify(1) e.notify(2) assert(ref.get === Seq(1, 2)) e.notify(3) e.notify(4) e.notify(5) assert(ref.get === Seq(1, 2, 3, 4, 5)) e.notify(6) e.notify(7) assert(ref.get === Seq(1, 2, 3, 4, 5)) } test("Event.merge") { val e1, e2 = Event[Int]() val e = e1 merge e2 val ref = new AtomicReference[Seq[Int]](Seq.empty) e.build.register(Witness(ref)) for (i <- 0 until 100) e1.notify(i) for (i <- 100 until 200) e2.notify(i) for (i <- 200 until 300) { if (i%2 == 0) e1.notify(i) else e2.notify(i) } assert(ref.get === Seq.range(0, 300)) } test("Event.toVar") { val e = Event[Int]() val v = Var(0, e) val ref = new AtomicReference[Seq[Int]](Seq.empty) v.changes.build.register(Witness(ref)) for (i <- 1 until 100) e.notify(i) assert(ref.get === Seq.range(0, 100)) } test("Event.toFuture") { val e = Event[Int]() val f = e.toFuture() assert(!f.isDefined) e.notify(123) assert(f.isDefined) assert(Await.result(f) === 123) } test("Event.toFuture[Interrupted]") { val e = Event[Int]() val f = e.toFuture() assert(!f.isDefined) val exc = new Exception f.raise(exc) assert(f.isDefined) val caught = intercept[Exception] { Await.result(f) } assert(caught === exc) } test("Jake's composition test") { def sum(v: Var[Int]): Var[Int] = { val e = v.changes.foldLeft(0) (_+_) Var(0, e) } def ite[T](i: Var[Boolean], t: Var[T], e: Var[T]) = i flatMap { i => if (i) t else e } val b = Var(true) val x = Var(7) val y = Var(9) val z = ite(b, sum(x), sum(y)) val ref = new AtomicReference[Int] z.changes.register(Witness(ref)) assert(ref.get === 7) x() = 10 assert(ref.get === 17) b() = false assert(ref.get === 9) y() = 10 assert(ref.get === 19) b() = true assert(ref.get === 17) x() = 3 assert(ref.get === 20) } }
mosesn/util
util-core/src/test/scala/com/twitter/util/EventTest.scala
Scala
apache-2.0
6,468
/* * Copyright 2016 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600.v3 import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger, Linked} import uk.gov.hmrc.ct.computations.CP295 // was B37 case class B315(value: Int) extends CtBoxIdentifier(name = "Profits chargeable to corporation tax") with CtInteger object B315 extends Linked[CP295, B315] { override def apply(source: CP295): B315 = B315(source.value) }
ahudspith-equalexperts/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600/v3/B315.scala
Scala
apache-2.0
978
package breeze.linalg import org.scalatest.FunSuite /** * TODO * * @author dlwh **/ class squaredDistanceTest extends FunSuite { test("simple squared distance test") { assert(squaredDistance(DenseVector(3.0, 4.0), DenseVector.zeros[Double](2)) === 25.0) assert(squaredDistance(DenseVector(3.0, 4.0), SparseVector.zeros[Double](2)) === 25.0) } }
wavelets/breeze
src/test/scala/breeze/linalg/squaredDistanceTest.scala
Scala
apache-2.0
363
import _root_.sbt._ trait JUnitXMLReport extends BasicScalaProject { //create a listener that writes to the normal output directory def junitXmlListener: TestReportListener = new eu.henkelmann.sbt.JUnitXmlTestsListener(outputPath.toString) //add the new listener to the already configured ones override def testListeners: Seq[TestReportListener] = super.testListeners ++ Seq(junitXmlListener) }
jrudolph/junit_xml_listener
src/main/scala/JUnitXMLReport.scala
Scala
mit
405
/*********************************************************************** * Copyright (c) 2013-2022 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.index.utils import java.util.concurrent.ConcurrentHashMap object SplitArrays { val EmptySplits = IndexedSeq(Array.empty[Byte]) private val splitArraysMap: ConcurrentHashMap[Int, IndexedSeq[Array[Byte]]] = new ConcurrentHashMap[Int, IndexedSeq[Array[Byte]]]() def apply(numSplits: Int): IndexedSeq[Array[Byte]] = { if (numSplits < 2) { EmptySplits } else { var splits = splitArraysMap.get(numSplits) if (splits == null) { splits = (0 until numSplits).map(_.toByte).toArray.map(Array(_)).toIndexedSeq splitArraysMap.put(numSplits, splits) } splits } } }
locationtech/geomesa
geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/utils/SplitArrays.scala
Scala
apache-2.0
1,116
package org.gtri.util.scala.xmlbuilder import org.gtri.util.xsddatatypes._ import org.gtri.util.xsddatatypes.XsdQName._ trait XmlNamespaceContext extends NamespaceURIToPrefixResolver with PrefixToNamespaceURIResolver { def prefixToNamespaceURIMap : Map[XsdNCName, XsdAnyURI] def optPrefixesOrder : Option[Seq[(XsdNCName)]] private def optOrderedPrefixTuples : Option[Seq[(XsdNCName, XsdAnyURI)]] = for{ prefixesOrder <- optPrefixesOrder } yield for { prefix <- prefixesOrder uri <- prefixToNamespaceURIMap.get(prefix) } yield (prefix,uri) // If no prefixes order defined, sort by prefix name lazy val orderedPrefixToNamespaceURITuples : Seq[(XsdNCName, XsdAnyURI)] = optOrderedPrefixTuples.getOrElse(prefixToNamespaceURIMap.toSeq.sortBy(_._1)) // Using orderedPrefixes to select the first prefix if there are multiple prefixes mapped to a particular uri lazy val namespaceURIToPrefixMap : Map[XsdAnyURI,XsdNCName] = orderedPrefixToNamespaceURITuples.groupBy(_._2).mapValues(_.head._1) def isValidPrefixForNamespaceURI(prefix: XsdNCName, namespaceURI: XsdAnyURI) = prefixToNamespaceURIMap.mapValues(_ == namespaceURI).getOrElse(prefix, false) def getPrefixForNamespaceURI(namespaceURI: XsdAnyURI) = namespaceURIToPrefixMap.get(namespaceURI).orNull def getNamespaceURIForPrefix(prefix : XsdNCName) = prefixToNamespaceURIMap.get(prefix).orNull }
gtri-iead/org.gtri.util.scala
xmlbuilder/src/main/scala/org/gtri/util/scala/xmlbuilder/XmlNamespaceContext.scala
Scala
gpl-3.0
1,403
package scala.collection.scalatest import org.scalatest._ trait IntBagBehaviours extends BagBehaviours with Matchers { this: FlatSpec => def intBagBehaviour(bag: scala.collection.Bag[Int]) { it should "grow by 1 with +(elem) operation" in { assertResult(bag.size + 1) { val newBag = bag + 1 newBag.size } assertResult(bag.size + 1) { val newBag = bag + 2 newBag.size } assertResult(bag.size + 1) { val newBag = bag + 10 newBag.size } } it should "grow by m with +(elem->m) operation" in { assertResult(bag.size + 4) { (bag + (1 -> 4)).size } assertResult(bag.size + 1) { (bag + (2 -> 1)).size } assertResult(bag.size) { (bag + (3 -> 0)).size } assertResult(bag.size) { (bag + (3 -> -3)).size } } val distinct = bag.distinct it should "implement [distinct]: all multiplicities must be one" in { for (elem <- distinct) { assertResult(1) { distinct.multiplicity(elem) } } } it should "implement [distinct]: all distinct element must be present" in { assertResult(bag.toSet.toList.sorted) { distinct.toList.sorted } } it should "have the same size when mapped" in { assertResult(bag.size) { (bag map identity).size } assertResult(bag.size) { (bag map (s => -s)).size } assertResult(bag.size) { (bag map (s => 28918265)).size } } if (bag.nonEmpty) { it should "implement reduce coherently" in { assertResult(bag.toList.reduce(_ + _)) { bag.reduce(_ + _) } } } it should "implement [sum]" in { assertResult(bag.toList.sum) { bag.sum } } it should "implement [product]" in { assertResult(bag.toList.product) { bag.product } } } }
sageserpent-open/multisets
src/test/scala/scala/collection/scalatest/IntBagBehaviours.scala
Scala
bsd-3-clause
1,973
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.python import java.io.File import scala.collection.mutable.ArrayBuffer import org.apache.spark.{SparkEnv, TaskContext} import org.apache.spark.api.python.{ChainedPythonFunctions, PythonEvalType} import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical.{AllTuples, ClusteredDistribution, Distribution, Partitioning} import org.apache.spark.sql.execution.{GroupedIterator, SparkPlan, UnaryExecNode} import org.apache.spark.sql.execution.arrow.ArrowUtils import org.apache.spark.sql.types.{DataType, StructField, StructType} import org.apache.spark.util.Utils /** * Physical node for aggregation with group aggregate Pandas UDF. * * This plan works by sending the necessary (projected) input grouped data as Arrow record batches * to the python worker, the python worker invokes the UDF and sends the results to the executor, * finally the executor evaluates any post-aggregation expressions and join the result with the * grouped key. */ case class AggregateInPandasExec( groupingExpressions: Seq[NamedExpression], udfExpressions: Seq[PythonUDF], resultExpressions: Seq[NamedExpression], child: SparkPlan) extends UnaryExecNode { override val output: Seq[Attribute] = resultExpressions.map(_.toAttribute) override def outputPartitioning: Partitioning = child.outputPartitioning override def producedAttributes: AttributeSet = AttributeSet(output) override def requiredChildDistribution: Seq[Distribution] = { if (groupingExpressions.isEmpty) { AllTuples :: Nil } else { ClusteredDistribution(groupingExpressions) :: Nil } } private def collectFunctions(udf: PythonUDF): (ChainedPythonFunctions, Seq[Expression]) = { udf.children match { case Seq(u: PythonUDF) => val (chained, children) = collectFunctions(u) (ChainedPythonFunctions(chained.funcs ++ Seq(udf.func)), children) case children => // There should not be any other UDFs, or the children can't be evaluated directly. assert(children.forall(_.find(_.isInstanceOf[PythonUDF]).isEmpty)) (ChainedPythonFunctions(Seq(udf.func)), udf.children) } } override def requiredChildOrdering: Seq[Seq[SortOrder]] = Seq(groupingExpressions.map(SortOrder(_, Ascending))) override protected def doExecute(): RDD[InternalRow] = { val inputRDD = child.execute() val sessionLocalTimeZone = conf.sessionLocalTimeZone val pythonRunnerConf = ArrowUtils.getPythonRunnerConfMap(conf) val (pyFuncs, inputs) = udfExpressions.map(collectFunctions).unzip // Filter child output attributes down to only those that are UDF inputs. // Also eliminate duplicate UDF inputs. val allInputs = new ArrayBuffer[Expression] val dataTypes = new ArrayBuffer[DataType] val argOffsets = inputs.map { input => input.map { e => if (allInputs.exists(_.semanticEquals(e))) { allInputs.indexWhere(_.semanticEquals(e)) } else { allInputs += e dataTypes += e.dataType allInputs.length - 1 } }.toArray }.toArray // Schema of input rows to the python runner val aggInputSchema = StructType(dataTypes.zipWithIndex.map { case (dt, i) => StructField(s"_$i", dt) }) inputRDD.mapPartitionsInternal { iter => val prunedProj = UnsafeProjection.create(allInputs, child.output) val grouped = if (groupingExpressions.isEmpty) { // Use an empty unsafe row as a place holder for the grouping key Iterator((new UnsafeRow(), iter)) } else { GroupedIterator(iter, groupingExpressions, child.output) }.map { case (key, rows) => (key, rows.map(prunedProj)) } val context = TaskContext.get() // The queue used to buffer input rows so we can drain it to // combine input with output from Python. val queue = HybridRowQueue(context.taskMemoryManager(), new File(Utils.getLocalDir(SparkEnv.get.conf)), groupingExpressions.length) context.addTaskCompletionListener[Unit] { _ => queue.close() } // Add rows to queue to join later with the result. val projectedRowIter = grouped.map { case (groupingKey, rows) => queue.add(groupingKey.asInstanceOf[UnsafeRow]) rows } val columnarBatchIter = new ArrowPythonRunner( pyFuncs, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF, argOffsets, aggInputSchema, sessionLocalTimeZone, pythonRunnerConf).compute(projectedRowIter, context.partitionId(), context) val joinedAttributes = groupingExpressions.map(_.toAttribute) ++ udfExpressions.map(_.resultAttribute) val joined = new JoinedRow val resultProj = UnsafeProjection.create(resultExpressions, joinedAttributes) columnarBatchIter.map(_.rowIterator.next()).map { aggOutputRow => val leftRow = queue.remove() val joinedRow = joined(leftRow, aggOutputRow) resultProj(joinedRow) } } } }
WindCanDie/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/python/AggregateInPandasExec.scala
Scala
apache-2.0
5,981
package com.sksamuel.elastic4s.cluster import org.elasticsearch.cluster.health.ClusterHealthStatus import org.elasticsearch.common.Priority import com.sksamuel.exts.OptionImplicits._ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder trait ClusterApi { def clusterState(): ClusterStateDefinition = ClusterStateDefinition() def clusterStats() = new ClusterStatsDefinition def clusterPersistentSettings(settings: Map[String, String]) = ClusterSettingsDefinition(settings, Map.empty) def clusterTransientSettings(settings: Map[String, String]) = ClusterSettingsDefinition(Map.empty, settings) def clusterHealth(): ClusterHealthDefinition = clusterHealth("_all") def clusterHealth(first: String, rest: String*): ClusterHealthDefinition = ClusterHealthDefinition(first +: rest) def clusterHealth(indices: Iterable[String]): ClusterHealthDefinition = ClusterHealthDefinition(indices.toIndexedSeq) } case class ClusterStatsDefinition() case class ClusterSettingsDefinition(persistentSettings: Map[String, String], transientSettings: Map[String, String]) { import scala.collection.JavaConverters._ private[elastic4s] def build(builder: ClusterUpdateSettingsRequestBuilder): ClusterUpdateSettingsRequestBuilder = { builder.setPersistentSettings(persistentSettings.asJava) builder.setTransientSettings(transientSettings.asJava) } def persistentSettings(settings: Map[String, String]): ClusterSettingsDefinition = { copy(persistentSettings = settings) } def transientSettings(settings: Map[String, String]): ClusterSettingsDefinition = { copy(transientSettings = settings) } } case class ClusterStateDefinition(metrics: Seq[String] = Seq.empty, indices: Seq[String] = Seq.empty) { def metrics(metrics: Seq[String]): ClusterStateDefinition = copy(metrics = metrics) def indices(indices: Seq[String]): ClusterStateDefinition = copy(indices = indices) } case class ClusterHealthDefinition(indices: Seq[String], timeout: Option[String] = None, waitForActiveShards: Option[Int] = None, waitForEvents: Option[Priority] = None, waitForStatus: Option[ClusterHealthStatus] = None, waitForNodes: Option[String] = None) { def timeout(value: String): ClusterHealthDefinition = copy(timeout = value.some) def waitForActiveShards(waitForActiveShards: Int): ClusterHealthDefinition = copy(waitForActiveShards = waitForActiveShards.some) def waitForEvents(waitForEvents: Priority): ClusterHealthDefinition = copy(waitForEvents = waitForEvents.some) def waitForStatus(waitForStatus: ClusterHealthStatus): ClusterHealthDefinition = copy(waitForStatus = waitForStatus.some) def waitForNodes(waitForNodes: String): ClusterHealthDefinition = copy(waitForNodes = waitForNodes.some) }
aroundus-inc/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/cluster/ClusterApi.scala
Scala
apache-2.0
2,989
package com.gilt.thehand.rules.logical import com.gilt.thehand.{AbstractContext, AbstractRuleParser, Rule} /** * Used to statically evaluate a Rule as true, and parse values to a true */ case object True extends Rule with AbstractRuleParser { /** * Extractor for rule matching. Since this is a True, always return a Some. */ def unapply(context: AbstractContext): Option[AbstractContext] = Some(context) /** * Extractor for string parsing. */ def unapply(deserializeFrom: String): Option[Rule] = deserializeFrom.trim match { case "True" | "true" | "T" | "t" | "1" => Some(True) case _ => None } }
gilt/the-hand
src/main/scala/com/gilt/thehand/rules/logical/True.scala
Scala
apache-2.0
632
package part2actors import akka.actor.{Actor, ActorLogging, ActorSystem, Props} import akka.event.Logging /** * Logging is done asynchronously. */ object ActorLogging extends App { /** * An explicit logger can be used for logging. */ class ActorWithExplicitLogger extends Actor { val logger = Logging(context.system, this) override def receive: Receive = { case message => logger.info(message.toString) } } /** * Logging with ActorLogging. It works the same way as the * explicit version. */ class ActorWithLogging extends Actor with ActorLogging { override def receive: Receive = { case message => log.info(message.toString) } } val system = ActorSystem("logger") val explicit = system.actorOf(Props[ActorWithExplicitLogger], "explicit") explicit ! "OMG barbecue!" val withLogging = system.actorOf(Props[ActorWithLogging], "withLogging") withLogging ! "It works again!" }
guhemama/moocs
RockAkka/src/main/scala/part2actors/ActorLogging.scala
Scala
bsd-3-clause
958
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.platanios.tensorflow.api.ops import org.platanios.tensorflow.api.core.Shape import org.platanios.tensorflow.api.core.exception.InvalidArgumentException import org.platanios.tensorflow.api.implicits.Implicits._ import org.platanios.tensorflow.api.ops.Gradients.{Registry => GradientsRegistry} import org.platanios.tensorflow.api.tensors.Tensor import org.platanios.tensorflow.api.types._ import scala.language.postfixOps /** Contains functions for constructing general math-related ops. * * @author Emmanouil Antonios Platanios */ private[api] trait Math { /** $OpDocMathSelect * * @group MathOps * @param condition Boolean condition tensor. * @param x Tensor which may have the same shape as `condition`. If `condition` has rank `1`, then `t` may * have a higher rank, but its first dimension must match the size of `condition`. * @param y Tensor with the same data type and shape as `t`. * @param name Name for the created op. * @return Created op output. */ def select(condition: Output, x: Output, y: Output, name: String = "Select"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Select", name = name) .addInput(condition) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathRange * * @group MathOps * @param start Rank 0 (i.e., scalar) tensor that contains the starting value of the number sequence. * @param limit Rank 0 (i.e., scalar) tensor that contains the ending value (exclusive) of the number sequence. * @param delta Rank 0 (i.e., scalar) tensor that contains the difference between consecutive numbers in the * sequence. * @param name Name for the created op. * @return Created op output. */ def range( start: Output, limit: Output, delta: Output = Basic.constant(1), dataType: DataType = null, name: String = "Range"): Output = { var castedStart: Output = start var castedLimit: Output = limit var castedDelta: Output = delta Op.createWith(nameScope = name) { val inferredDataType = { if (dataType != null) dataType else DataType.mostPrecise(start.dataType, limit.dataType, delta.dataType) } if (start.dataType != inferredDataType) castedStart = cast(start, inferredDataType) if (limit.dataType != inferredDataType) castedLimit = cast(limit, inferredDataType) if (delta.dataType != inferredDataType) castedDelta = cast(delta, inferredDataType) } Op.Builder(opType = "Range", name = name) .addInput(castedStart) .addInput(castedLimit) .addInput(castedDelta) .build().outputs(0) } /** $OpDocMathLinspace * * @group MathOps * @param start Rank 0 (i.e., scalar) tensor that contains the starting value of the number sequence. * @param stop Rank 0 (i.e., scalar) tensor that contains the ending value (inclusive) of the number * sequence. * @param numberOfValues Rank 0 (i.e., scalar) tensor that contains the number of values in the number sequence. * @param name Name for the created op. * @return Created op output. */ def linspace(start: Output, stop: Output, numberOfValues: Output, name: String = "LinSpace"): Output = { Op.Builder(opType = "LinSpace", name = name) .addInput(start) .addInput(stop) .addInput(numberOfValues) .build().outputs(0) } /** $OpDocMathCast * * @group MathOps * @param x Tensor to cast. * @param dataType Target data type. * @param name Name for the created op. * @return Created op output. */ def cast[T <: OutputLike : OutputOps](x: T, dataType: DataType, name: String = "Cast"): T = { if (x.dataType == dataType) { x } else { if (x.dataType.isComplex && !dataType.isComplex) logger.warn("Casting complex tensors to real tensors discards the imaginary part.") implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Cast", name = name) .addInput(o) .setAttribute("DstT", dataType) .build().outputs(0)) } } // TODO: [OPS] saturateCast /** $OpDocMathBitcast * * @group MathOps * @param input Input tensor. * @param dataType Target data type. * @param name Name for the created op. * @return Created op output. */ def bitcast(input: Output, dataType: DataType, name: String = "Bitcast"): Output = { Op.Builder(opType = "Bitcast", name = name) .addInput(input) .setAttribute("type", dataType) .build().outputs(0) } /** $OpDocMathAddN * * @group MathOps * @param inputs Input tensors. * @param name Created op name. * @return Created op output. */ def addN(inputs: Seq[Output], name: String = "AddN"): Output = { if (inputs.length == 1) Basic.identity(inputs(0), name) else Op.Builder(opType = "AddN", name = name) .addInputList(castArgs(inputs)) .build().outputs(0) } /** $OpDocMathAccumulateN * * @param inputs Input tensors. * @param shape Shape of the elements of `inputs` (in case it's not known statically and needs to be retained). * @param name Created op name. * @return Created op output. * @throws InvalidArgumentException If any of the inputs has a different data type and/or shape than the rest. */ @throws[InvalidArgumentException] def accumulateN( inputs: Seq[Output], shape: Shape = null, name: String = "AccumulateN" ): Output = { val dataType = inputs.head.dataType if (inputs.exists(_.dataType != dataType)) throw InvalidArgumentException("All input tensors must have the same data type.") val inferredShape = if (shape == null) Shape.unknown() else shape if (inputs.exists(!_.shape.isCompatibleWith(inferredShape))) throw InvalidArgumentException("All input tensors must have the same shape.") if (inputs.length == 1 && name == null) { inputs.head } else if (inputs.length == 1) { Basic.identity(inputs.head, name = name) } else { Op.Builder(opType = "AccumulateNV2", name = name) .addInputList(inputs) .setAttribute("shape", shape) .build().outputs(0) } } //region Unary Ops /** $OpDocMathAbs * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def abs[T <: OutputLike : OutputOps](x: T, name: String = "Abs"): T = { if (x.dataType.isComplex) { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "ComplexAbs", name = name) .addInput(o) .setAttribute("Tout", x.dataType.real) .build().outputs(0)) } else { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Abs", name = name) .addInput(o) .build().outputs(0)) } } /** $OpDocMathNegate * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def negate[T: OutputOps](x: T, name: String = "Negate"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Neg", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathReciprocal * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def reciprocal[T: OutputOps](x: T, name: String = "Reciprocal"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Reciprocal", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathSquare * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def square[T: OutputOps](x: T, name: String = "Square"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Square", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathSqrt * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def sqrt[T: OutputOps](x: T, name: String = "Sqrt"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Sqrt", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathRsqrt * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def rsqrt[T: OutputOps](x: T, name: String = "Rsqrt"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Rsqrt", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathExp * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def exp[T: OutputOps](x: T, name: String = "Exp"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Exp", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathExpm1 * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def expm1[T: OutputOps](x: T, name: String = "Expm1"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Expm1", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathLog * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def log[T: OutputOps](x: T, name: String = "Log"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Log", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathLog1p * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def log1p[T: OutputOps](x: T, name: String = "Log1p"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Log1p", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathSin * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def sin[T: OutputOps](x: T, name: String = "Sin"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Sin", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathCos * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def cos[T: OutputOps](x: T, name: String = "Cos"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Cos", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathTan * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def tan[T: OutputOps](x: T, name: String = "Tan"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Tan", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathAsin * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def asin[T: OutputOps](x: T, name: String = "Asin"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Asin", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathAcos * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def acos[T: OutputOps](x: T, name: String = "Acos"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Acos", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathAtan * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def atan[T: OutputOps](x: T, name: String = "Atan"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Atan", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathSinh * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def sinh[T: OutputOps](x: T, name: String = "Sinh"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Sinh", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathCosh * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def cosh[T: OutputOps](x: T, name: String = "Cosh"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Cosh", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathTanh * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def tanh[T: OutputOps](x: T, name: String = "Tanh"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Tanh", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathAsinh * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def asinh[T: OutputOps](x: T, name: String = "ASinh"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Asinh", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathAcosh * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def acosh[T: OutputOps](x: T, name: String = "ACosh"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Acosh", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathAtanh * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def atanh[T: OutputOps](x: T, name: String = "ATanh"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Atanh", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathLogGamma * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def logGamma[T: OutputOps](x: T, name: String = "Lgamma"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Lgamma", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathDigamma * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def digamma[T: OutputOps](x: T, name: String = "Digamma"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Digamma", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathErf * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def erf[T: OutputOps](x: T, name: String = "Erf"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Erf", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathErfc * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def erfc[T: OutputOps](x: T, name: String = "Erfc"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Erfc", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathSigmoid * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def sigmoid[T: OutputOps](x: T, name: String = "Sigmoid"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Sigmoid", name = name) .addInput(o) .build().outputs(0)) } // TODO: [OPS] logSigmoid /** $OpDocMathSign * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, `INT64`, * `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def sign[T: OutputOps](x: T, name: String = "Sign"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Sign", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathRound * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `COMPLEX64`, or * `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def round[T: OutputOps](x: T, name: String = "Round"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Round", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathRoundInt * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def roundInt[T: OutputOps](x: T, name: String = "RoundInt"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Rint", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathFloor * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def floor[T: OutputOps](x: T, name: String = "Floor"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Floor", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathCeil * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def ceil[T: OutputOps](x: T, name: String = "Ceil"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "Ceil", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathIsNaN * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def isNaN[T: OutputOps](x: T, name: String = "IsNaN"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "IsNan", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathIsInf * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def isInf[T: OutputOps](x: T, name: String = "IsInf"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "IsInf", name = name) .addInput(o) .build().outputs(0)) } /** $OpDocMathIsFinite * * @group MathOps * @param x Input tensor that must be one of the following types: `HALF`, `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def isFinite[T: OutputOps](x: T, name: String = "IsFinite"): T = { implicitly[OutputOps[T]] .applyUnary(x, o => Op.Builder(opType = "IsFinite", name = name) .addInput(o) .build().outputs(0)) } //endregion Unary Ops //region Binary Ops /** $OpDocMathAdd * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, `COMPLEX128`, or `STRING`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, `COMPLEX128`, or `STRING`. * @param name Name for the created op. * @return Created op output. */ def add(x: Output, y: Output, name: String = "Add"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Add", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathSubtract * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def subtract(x: Output, y: Output, name: String = "Sub"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Sub", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathMultiply * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def multiply(x: Output, y: Output, name: String = "Mul"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Mul", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathDivide * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def divide(x: Output, y: Output, name: String = "Div"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Div", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathFloorDivide * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ @deprecated("Use `truncateDivide` instead.", "0.1") def floorDivide(x: Output, y: Output, name: String = "FloorDiv"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "FloorDiv", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathTruncateDivide * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def truncateDivide(x: Output, y: Output, name: String = "TruncateDiv"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "TruncateDiv", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathRealDivide * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `UINT8`, * `INT8`, `INT16`, `INT32`, `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def realDivide(x: Output, y: Output, name: String = "RealDiv"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "RealDiv", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathSquaredDifference * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def squaredDifference(x: Output, y: Output, name: String = "SquaredDifference"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "SquaredDifference", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathMod * * @group MathOps * @param x First input tensor that must be one of the following types: `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param y Second input tensor that must be one of the following types: `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param name Name for the created op. * @return Created op output. */ def mod(x: Output, y: Output, name: String = "Mod"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Mod", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathFloorMod * * @group MathOps * @param x First input tensor that must be one of the following types: `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param y Second input tensor that must be one of the following types: `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param name Name for the created op. * @return Created op output. */ def floorMod(x: Output, y: Output, name: String = "FloorMod"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "FloorMod", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathTruncateMod * * @group MathOps * @param x First input tensor that must be one of the following types: `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param y Second input tensor that must be one of the following types: `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param name Name for the created op. * @return Created op output. */ def truncateMod(x: Output, y: Output, name: String = "TruncateMod"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "TruncateMod", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathPow * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * `INT64`, `COMPLEX64`, or `COMPLEX128`. * @param name Name for the created op. * @return Created op output. */ def pow(x: Output, y: Output, name: String = "Pow"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Pow", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathIgammac * * @group MathOps * @param a First input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param x Second input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def igammac(a: Output, x: Output, name: String = "Igammac"): Output = { val (cA, cX) = castArgs(a, x) Op.Builder(opType = "Igammac", name = name) .addInput(cA) .addInput(cX) .build().outputs(0) } /** $OpDocMathIgamma * * @group MathOps * @param a First input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param x Second input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def igamma(a: Output, x: Output, name: String = "Igamma"): Output = { val (cA, cX) = castArgs(a, x) Op.Builder(opType = "Igamma", name = name) .addInput(cA) .addInput(cX) .build().outputs(0) } /** $OpDocMathZeta * * @group MathOps * @param x First input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param q Second input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def zeta(x: Output, q: Output, name: String = "Zeta"): Output = { val (cX, cQ) = castArgs(x, q) Op.Builder(opType = "Zeta", name = name) .addInput(cX) .addInput(cQ) .build().outputs(0) } /** $OpDocMathPolygamma * * @group MathOps * @param n First input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param x Second input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def polygamma(n: Output, x: Output, name: String = "Polygamma"): Output = { val (cN, cX) = castArgs(n, x) Op.Builder(opType = "Polygamma", name = name) .addInput(cN) .addInput(cX) .build().outputs(0) } /** $OpDocMathAtan2 * * @group MathOps * @param x First input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param y Second input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def atan2(x: Output, y: Output, name: String = "ATan2"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Atan2", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathMinimum * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * or `INT64`. * @param name Name for the created op. * @return Created op output. */ def minimum(x: Output, y: Output, name: String = "Minimum"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Minimum", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathMaximum * * @group MathOps * @param x First input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, or * `INT64`. * @param y Second input tensor that must be one of the following types: `HALF`, `FLOAT32`, `FLOAT64`, `INT32`, * or `INT64`. * @param name Name for the created op. * @return Created op output. */ def maximum(x: Output, y: Output, name: String = "Maximum"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Maximum", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } //endregion Binary Ops /** $OpDocMathIncompleteBeta * * @group MathOps * @param a First input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param b Second input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param x Third input tensor that must be one of the following types: `FLOAT32`, or `FLOAT64`. * @param name Name for the created op. * @return Created op output. */ def incompleteBeta(a: Output, b: Output, x: Output, name: String = "IncompleteBeta"): Output = { val (cA, cB, cX) = castArgs(a, b, x) Op.Builder(opType = "Betainc", name = name) .addInput(cA) .addInput(cB) .addInput(cX) .build().outputs(0) } //region Logical Ops /** $OpDocMathLogicalNot * * @group MathOps * @param x Input tensor. * @param name Name for the created op. * @return Created op output. */ def logicalNot(x: Output, name: String = "LogicalNot"): Output = { Op.Builder(opType = "LogicalNot", name = name) .addInput(x) .build().outputs(0) } /** $OpDocMathLogicalAnd * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def logicalAnd(x: Output, y: Output, name: String = "LogicalAnd"): Output = { Op.Builder(opType = "LogicalAnd", name = name) .addInput(x) .addInput(y) .build().outputs(0) } /** $OpDocMathLogicalOr * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def logicalOr(x: Output, y: Output, name: String = "LogicalOr"): Output = { Op.Builder(opType = "LogicalOr", name = name) .addInput(x) .addInput(y) .build().outputs(0) } /** $OpDocMathLogicalXOr * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def logicalXOr(x: Output, y: Output, name: String = "LogicalXOr"): Output = { logicalAnd(logicalOr(x, y), logicalNot(logicalAnd(x, y)), name = name) } //endregion Logical Ops //region Comparison Ops /** $OpDocMathEqual * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def equal(x: Output, y: Output, name: String = "Equal"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Equal", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathNotEqual * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def notEqual(x: Output, y: Output, name: String = "NotEqual"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "NotEqual", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** $OpDocMathApproximatelyEqual * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param tolerance Comparison tolerance value. * @param name Name for the created op. * @return Created op output. */ def approximatelyEqual( x: Output, y: Output, tolerance: Float = 0.00001f, name: String = "ApproximatelyEqual"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "ApproximateEqual", name = name) .addInput(cX) .addInput(cY) .setAttribute("tolerance", tolerance) .build().outputs(0) } /** OpDocMathLess * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def less(x: Output, y: Output, name: String = "Less"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Less", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** OpDocMathLessEqual * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def lessEqual(x: Output, y: Output, name: String = "LessEqual"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "LessEqual", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** OpDocMathGreater * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def greater(x: Output, y: Output, name: String = "Greater"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "Greater", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } /** OpDocMathGreaterEqual * * @group MathOps * @param x First input tensor. * @param y Second input tensor. * @param name Name for the created op. * @return Created op output. */ def greaterEqual(x: Output, y: Output, name: String = "GreaterEqual"): Output = { val (cX, cY) = castArgs(x, y) Op.Builder(opType = "GreaterEqual", name = name) .addInput(cX) .addInput(cY) .build().outputs(0) } //endregion Comparison Ops //region Reduction Ops private[this] def reductionAxes[T <: OutputLike](tensor: T, axes: Output): Output = { if (axes != null) { axes } else { tensor match { // Fast path: Avoid creating range and rank ops if the rank is known statically. case o: Output if o.rank > -1 => Basic.constant(0 until o.rank) case o: OutputIndexedSlices if o.denseShape.shape.isFullyDefined => Basic.constant(0 until o.denseShape.shape(0)) case o: SparseOutput if o.denseShape.shape.isFullyDefined => Basic.constant(0 until o.denseShape.shape(0)) case _ => // Otherwise, we rely on range and rank to do the right thing at run-time. range(0, Basic.rank(tensor)) } } } /** $OpDocMathSum * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def sum(input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "Sum"): Output = { if (input.rank == 0) input else Op.Builder(opType = "Sum", name = name) .addInput(input) .addInput(reductionAxes(input, axes)) .setAttribute("keep_dims", keepDims) .build().outputs(0) } /** $OpDocMathMean * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def mean(input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "Mean"): Output = { if (input.rank == 0) input else Op.Builder(opType = "Mean", name = name) .addInput(input) .addInput(reductionAxes(input, axes)) .setAttribute("keep_dims", keepDims) .build().outputs(0) } /** $OpDocMathProd * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def prod(input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "Prod"): Output = { if (input.rank == 0) input else Op.Builder(opType = "Prod", name = name) .addInput(input) .addInput(reductionAxes(input, axes)) .setAttribute("keep_dims", keepDims) .build().outputs(0) } /** $OpDocMathMin * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def min(input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "Min"): Output = { if (input.rank == 0) input else Op.Builder(opType = "Min", name = name) .addInput(input) .addInput(reductionAxes(input, axes)) .setAttribute("keep_dims", keepDims) .build().outputs(0) } /** $OpDocMathMax * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def max(input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "Max"): Output = { if (input.rank == 0) input else Op.Builder(opType = "Max", name = name) .addInput(input) .addInput(reductionAxes(input, axes)) .setAttribute("keep_dims", keepDims) .build().outputs(0) } /** $OpDocMathAll * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def all(input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "All"): Output = { Op.Builder(opType = "All", name = name) .addInput(input) .addInput(reductionAxes(input, axes)) .setAttribute("keep_dims", keepDims) .build().outputs(0) } /** $OpDocMathAny * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def any(input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "Any"): Output = { Op.Builder(opType = "Any", name = name) .addInput(input) .addInput(reductionAxes(input, axes)) .setAttribute("keep_dims", keepDims) .build().outputs(0) } /** $OpDocMathLogSumExp * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output. */ def logSumExp( input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "LogSumExp"): Output = { if (input.rank == 0) input else Op.createWith(nameScope = name) { val maxValue = Basic.stopGradient(max(input, axes, keepDims = true)) var result = log(sum(exp(input - maxValue), axes, keepDims = true)) if (!keepDims) result += Basic.reshape(maxValue, Basic.shape(result)) else result += maxValue result } } /** $OpDocMathCountNonZero * * @group MathOps * @param input Input tensor to reduce. * @param axes Integer array containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @param name Name for the created op. * @return Created op output with `INT64` data type. */ def countNonZero( input: Output, axes: Output = null, keepDims: Boolean = false, name: String = "CountNonZero"): Output = { Op.createWith(nameScope = name) { sum(cast(notEqual(input, Basic.constant(0)), INT64), axes, keepDims) } } /** $OpDocMathCountNonZero * * @group MathOps * @param input Input tensor for which to count the number of non-zero entries. * @param name Name for the created op. * @return Created op output with `INT64` data type. */ def countNonZeroSparse[T <: OutputLike](input: T, name: String = "CountNonZero"): Output = { Op.createWith(nameScope = name) { input match { case o: Output => sum(cast(notEqual(o, Basic.constant(0)), INT64)) case o: OutputIndexedSlices => sum(cast(notEqual(o.values, Basic.constant(0)), INT64)) case o: SparseOutput => sum(cast(notEqual(o.values, Basic.constant(0)), INT64)) } } } //endregion Reduction Ops /** $OpDocMathArgmax * * @group MathOps * @param input Input tensor. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param outputDataType Data type for the output tensor. Must be `INT32` or `INT64`. * @param name Name for the created op. * @return Created op output. */ def argmax(input: Output, axes: Output = 0, outputDataType: DataType = INT64, name: String = "ArgMax"): Output = { Op.Builder(opType = "ArgMax", name = name) .addInput(input) .addInput(axes) .setAttribute("output_type", outputDataType) .build().outputs(0) } /** $OpDocMathArgmin * * @group MathOps * @param input Input tensor. * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param outputDataType Data type for the output tensor. Must be [[INT32]] or [[INT64]]. * @param name Name for the created op. * @return Created op output. * @throws IllegalArgumentException If `axes` data type or `outputDataType` is not [[INT32]] or [[INT64]]. */ @throws[IllegalArgumentException] def argmin(input: Output, axes: Output = 0, outputDataType: DataType = INT64, name: String = "ArgMin"): Output = { Op.Builder(opType = "ArgMin", name = name) .addInput(input) .addInput(axes) .setAttribute("output_type", outputDataType) .build().outputs(0) } /** $OpDocMathBinCount * * @group MathOps * @param input [[INT32]] tensor containing non-negative values. * @param weights If not `null`, this tensor must have the same shape as `input`. For each value in `input`, the * corresponding bin count will be incremented by the corresponding weight instead of `1`. * @param minLength If not `null`, this ensures the output has length at least `minLength`, padding with zeros at * the end, if necessary. * @param maxLength If not `null`, this skips values in `input` that are equal or greater than `maxLength`, ensuring * that the output has length at most `maxLength`. * @param dataType If `weights` is `null`, this determines the data type used for the output tensor (i.e., the * tensor containing the bin counts). * @param name Name for the created op. * @return Created op output. */ def binCount( input: Output, weights: Output = null, minLength: Output = null, maxLength: Output = null, dataType: DataType = INT32, name: String = "BinCount"): Output = { val inputNonEmpty = greater(prod(Basic.shape(input)), 0) var outputSize = cast(inputNonEmpty, INT32) * (max(input) + 1) if (minLength != null) outputSize = maximum(minLength, outputSize) if (maxLength != null) outputSize = minimum(maxLength, outputSize) val effectiveWeights = { if (weights != null) { weights } else { Basic.zeros(dataType, Shape.scalar()) } } Op.Builder(opType = "Bincount", name = name) .addInput(input) .addInput(outputSize) .addInput(effectiveWeights) .build().outputs(0) } /** $OpDocMathCumsum * * @group MathOps * @param input Input tensor. * @param axis [[INT32]] tensor containing the axis along which to perform the cumulative sum. * @param exclusive Boolean value indicating whether to perform an exclusive cumulative sum. * @param reverse Boolean value indicating whether to perform a reverse cumulative sum. * @param name Name for the created op. * @return Created op output. */ def cumsum( input: Output, axis: Output = 0, exclusive: Boolean = false, reverse: Boolean = false, name: String = "CumSum"): Output = { Op.Builder(opType = "Cumsum", name = name) .addInput(input) .addInput(axis) .setAttribute("exclusive", exclusive) .setAttribute("reverse", reverse) .build().outputs(0) } /** $OpDocMathCumprod * * @group MathOps * @param input Input tensor. * @param axis `INT32` tensor containing the axis along which to perform the cumulative product. * @param exclusive Boolean value indicating whether to perform an exclusive cumulative product. * @param reverse Boolean value indicating whether to perform a reverse cumulative product. * @param name Name for the created op. * @return Created op output. */ def cumprod( input: Output, axis: Output = 0, exclusive: Boolean = false, reverse: Boolean = false, name: String = "CumProd"): Output = { Op.Builder(opType = "Cumprod", name = name) .addInput(input) .addInput(axis) .setAttribute("exclusive", exclusive) .setAttribute("reverse", reverse) .build().outputs(0) } //region Segment Ops /** $OpDocMathSegmentSum * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param name Name for the created op. * @return Created op output. */ def segmentSum(data: Output, segmentIndices: Output, name: String = "SegmentSum"): Output = { Op.Builder(opType = "SegmentSum", name = name) .addInput(data) .addInput(segmentIndices) .build().outputs(0) } /** $OpDocMathSegmentMean * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param name Name for the created op. * @return Created op output. */ def segmentMean(data: Output, segmentIndices: Output, name: String = "SegmentMean"): Output = { Op.Builder(opType = "SegmentMean", name = name) .addInput(data) .addInput(segmentIndices) .build().outputs(0) } /** $OpDocMathSegmentProd * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param name Name for the created op. * @return Created op output. */ def segmentProd(data: Output, segmentIndices: Output, name: String = "SegmentProd"): Output = { Op.Builder(opType = "SegmentProd", name = name) .addInput(data) .addInput(segmentIndices) .build().outputs(0) } /** $OpDocMathSegmentMin * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param name Name for the created op. * @return Created op output. */ def segmentMin(data: Output, segmentIndices: Output, name: String = "SegmentMin"): Output = { Op.Builder(opType = "SegmentMin", name = name) .addInput(data) .addInput(segmentIndices) .build().outputs(0) } /** $OpDocMathSegmentMax * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param name Name for the created op. * @return Created op output. */ def segmentMax(data: Output, segmentIndices: Output, name: String = "SegmentMax"): Output = { Op.Builder(opType = "SegmentMax", name = name) .addInput(data) .addInput(segmentIndices) .build().outputs(0) } /** $OpDocMathUnsortedSegmentSum * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @param name Name for the created op. * @return Created op output. */ def unsortedSegmentSum( data: Output, segmentIndices: Output, segmentsNumber: Output, name: String = "UnsortedSegmentSum" ): Output = { Op.Builder(opType = "UnsortedSegmentSum", name = name) .addInput(data) .addInput(segmentIndices) .addInput(segmentsNumber) .build().outputs(0) } /** Helper function for `unsortedSegmentMean` and `unsortedSegmentSqrtN` that computes the number of segment entries * with zero entries set to `1`, in order to allow for division by `N`. * * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @return Created op output. */ protected def unsortedSegmentN( data: Output, segmentIndices: Output, segmentsNumber: Output, name: String = "UnsortedSegmentN" ): Output = Op.createWithNameScope(name) { // `binCount` does not support negative indices and so we use `unsortedSegmentSum`. val ones = Basic.ones(data.dataType, Basic.shape(segmentIndices)) val N = unsortedSegmentSum(ones, segmentIndices, segmentsNumber) val outputRank = Basic.rank(data) - Basic.rank(segmentIndices) val outputRankTiled = Basic.tile(Basic.ones(segmentsNumber.dataType, Shape(1)), outputRank.expandDims(0)) val broadcastShape = Basic.concatenate(Seq(segmentsNumber.expandDims(0), outputRankTiled)) maximum(1, Basic.reshape(N, broadcastShape)) } /** $OpDocMathUnsortedSegmentMean * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @param name Name for the created op. * @return Created op output. */ def unsortedSegmentMean( data: Output, segmentIndices: Output, segmentsNumber: Output, name: String = "UnsortedSegmentMean" ): Output = Op.createWithNameScope(name) { val N = unsortedSegmentN(data, segmentIndices, segmentsNumber, name = "N") unsortedSegmentSum(data, segmentIndices, segmentsNumber, name = "Sum") / N } /** $OpDocMathUnsortedSegmentProd * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @param name Name for the created op. * @return Created op output. */ def unsortedSegmentProd( data: Output, segmentIndices: Output, segmentsNumber: Output, name: String = "UnsortedSegmentProd" ): Output = { Op.Builder(opType = "UnsortedSegmentProd", name = name) .addInput(data) .addInput(segmentIndices) .addInput(segmentsNumber) .build().outputs(0) } /** $OpDocMathUnsortedSegmentMin * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @param name Name for the created op. * @return Created op output. */ def unsortedSegmentMin( data: Output, segmentIndices: Output, segmentsNumber: Output, name: String = "UnsortedSegmentMin"): Output = { Op.Builder(opType = "UnsortedSegmentMin", name = name) .addInput(data) .addInput(segmentIndices) .addInput(segmentsNumber) .build().outputs(0) } /** $OpDocMathUnsortedSegmentMax * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @param name Name for the created op. * @return Created op output. */ def unsortedSegmentMax( data: Output, segmentIndices: Output, segmentsNumber: Output, name: String = "UnsortedSegmentMax"): Output = { Op.Builder(opType = "UnsortedSegmentMax", name = name) .addInput(data) .addInput(segmentIndices) .addInput(segmentsNumber) .build().outputs(0) } /** $OpDocMathUnsortedSegmentSqrtN * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @param name Name for the created op. * @return Created op output. */ def unsortedSegmentSqrtN( data: Output, segmentIndices: Output, segmentsNumber: Output, name: String = "UnsortedSegmentSqrtN" ): Output = Op.createWithNameScope(name) { val N = unsortedSegmentN(data, segmentIndices, segmentsNumber, name = "N") unsortedSegmentSum(data, segmentIndices, segmentsNumber, name = "Sum") / sqrt(N) } /** $OpDocMathSparseSegmentSum * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param indices One-dimensional tensor with rank equal to that of `segmentIndices`. * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param numSegments Optional `INT32` scalar indicating the size of the output tensor. * @param name Name for the created op. * @return Created op output. */ def sparseSegmentSum( data: Output, indices: Output, segmentIndices: Output, numSegments: Output = null, name: String = "SparseSegmentSum"): Output = { if (numSegments == null) { Op.Builder(opType = "SparseSegmentSum", name = name) .addInput(data) .addInput(indices) .addInput(segmentIndices) .build().outputs(0) } else { Op.Builder(opType = "SparseSegmentSumWithNumSegments", name = name) .addInput(data) .addInput(indices) .addInput(segmentIndices) .addInput(numSegments) .build().outputs(0) } } /** $OpDocMathSparseSegmentMean * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param indices One-dimensional tensor with rank equal to that of `segmentIndices`. * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param numSegments Optional `INT32` scalar indicating the size of the output tensor. * @param name Name for the created op. * @return Created op output. */ def sparseSegmentMean( data: Output, indices: Output, segmentIndices: Output, numSegments: Output = null, name: String = "SparseSegmentMean"): Output = { if (numSegments == null) { Op.Builder(opType = "SparseSegmentMean", name = name) .addInput(data) .addInput(indices) .addInput(segmentIndices) .build().outputs(0) } else { Op.Builder(opType = "SparseSegmentMeanWithNumSegments", name = name) .addInput(data) .addInput(indices) .addInput(segmentIndices) .addInput(numSegments) .build().outputs(0) } } /** $OpDocMathSparseSegmentSumSqrtN * * @group MathOps * @param data Data (must have a numeric data type -- i.e., representing a number). * @param indices One-dimensional tensor with rank equal to that of `segmentIndices`. * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param numSegments Optional `INT32` scalar indicating the size of the output tensor. * @param name Name for the created op. * @return Created op output. */ def sparseSegmentSumSqrtN( data: Output, indices: Output, segmentIndices: Output, numSegments: Output = null, name: String = "SparseSegmentSumSqrtN"): Output = { if (numSegments == null) { Op.Builder(opType = "SparseSegmentSqrtN", name = name) .addInput(data) .addInput(indices) .addInput(segmentIndices) .build().outputs(0) } else { Op.Builder(opType = "SparseSegmentSqrtNWithNumSegments", name = name) .addInput(data) .addInput(indices) .addInput(segmentIndices) .addInput(numSegments) .build().outputs(0) } } //endregion Segment Ops //region Matrix Ops /** $OpDocMathDiag * * @group MathOps * @param diagonal Diagonal values, represented as a rank-`K` tensor, where `K` can be at most `3`. * @param name Name for the created op. * @return Created op output. */ def diag(diagonal: Output, name: String = "Diag"): Output = { Op.Builder(opType = "Diag", name = name) .addInput(diagonal) .build().outputs(0) } /** $OpDocMathDiagPart * * @group MathOps * @param input Rank-`K` input tensor, where `K` is either `2`, `4`, or `6`. * @param name Name for the created op. * @return Created op output. */ def diagPart(input: Output, name: String = "DiagPart"): Output = { Op.Builder(opType = "DiagPart", name = name) .addInput(input) .build().outputs(0) } /** $OpDocMathMatrixDiag * * @group MathOps * @param diagonal Rank-`K` input tensor, where `K >= 1`. * @param name Name for the created op. * @return Created op output with rank equal to `K + 1` and shape equal to the shape of `diagonal`, with its last * dimension duplicated. */ def matrixDiag(diagonal: Output, name: String = "MatrixDiag"): Output = { Op.Builder(opType = "MatrixDiag", name = name) .addInput(diagonal) .build().outputs(0) } /** $OpDocMathMatrixSetDiag * * @group MathOps * @param input Rank-`K+1` tensor, where `K >= 2`. * @param diagonal Rank-`K` tensor, where `K >= 1`. * @param name Name for the created op. * @return Created op output with rank equal to `K + 1` and shape equal to the shape of `input`. */ def matrixSetDiag(input: Output, diagonal: Output, name: String = "MatrixSetDiag"): Output = { Op.Builder(opType = "MatrixSetDiag", name = name) .addInput(input) .addInput(diagonal) .build().outputs(0) } /** $OpDocMathMatrixDiagPart * * @group MathOps * @param input Rank-`K` tensor, where `K >= 2`. * @param name Name for the created op. * @return Created op output containing the diagonal(s) and having shape equal to * `input.shape[:-2] + [min(input.shape[-2:])]`. */ def matrixDiagPart(input: Output, name: String = "MatrixDiagPart"): Output = { Op.Builder(opType = "MatrixDiagPart", name = name) .addInput(input) .build().outputs(0) } /** $OpDocMathMatrixBandPart * * @group MathOps * @param input Input tensor. * @param numSubDiagonals Scalar `INT64` tensor that contains the number of sub-diagonals to keep. If negative, * the entire lower triangle is kept. * @param numSuperDiagonals Scalar `INT64` tensor that contains the number of super-diagonals to keep. If negative, * the entire upper triangle is kept. * @param name Name for the created op. */ def matrixBandPart( input: Output, numSubDiagonals: Output, numSuperDiagonals: Output, name: String = "MatrixBandPart"): Output = { if(!numSubDiagonals.dataType.isInteger) throw new IllegalArgumentException(s"'numSubDiagonals' must be integer, but was ${numSubDiagonals.dataType}.") if(!numSuperDiagonals.dataType.isInteger) throw new IllegalArgumentException(s"'numSuperDiagonals' must be integer, but was ${numSuperDiagonals.dataType}.") Op.Builder(opType = "MatrixBandPart", name = name) .addInput(input) .addInput(cast(numSubDiagonals, INT64)) .addInput(cast(numSuperDiagonals, INT64)) .build().outputs(0) } /** $OpDocMathTrace * * @group MathOps * @param input Input tensor. * @param name Name for the created op. * @return Created op output. */ def trace(input: Output, name: String = "Trace"): Output = { Op.createWithNameScope(name) { sum(matrixDiagPart(input), axes = -1) } } /** $OpDocMathScalarMul * * @group MathOps * @param scalar Scalar tensor. * @param tensor Tensor to multiply the scalar tensor with. * @param name Name for the created op. * @return Created op output. */ def scalarMul[T: OutputOps](scalar: Output, tensor: T, name: String = "ScalarMul"): T = { Op.createWithNameScope(name) { implicitly[OutputOps[T]].applyUnary(tensor, o => multiply(scalar, o)) } } /** $OpDocMathMatmul * * @group MathOps * @param a First input tensor with data type one of: `BFLOAT16`, `FLOAT16`, `FLOAT32`, `FLOAT64`, * `INT32`, `COMPLEX64`, `COMPLEX128`. * @param b Second input tensor with data type one of: `BFLOAT16`, `FLOAT16`, `FLOAT32`, `FLOAT64`, * `INT32`, `COMPLEX64`, `COMPLEX128`. * @param transposeA If `true`, `a` is transposed before the multiplication. * @param transposeB If `true`, `b` is transposed before the multiplication. * @param conjugateA If `true`, `a` is conjugated before the multiplication. * @param conjugateB If `true`, `b` is conjugated before the multiplication. * @param aIsSparse If `true`, `a` is treated as a sparse matrix (i.e., it is assumed it contains many zeros). * @param bIsSparse If `true`, `b` is treated as a sparse matrix (i.e., it is assumed it contains many zeros). * @param name Name for the created op. * @return Created op output that has the same data type as `a` and `b` and where each inner-most matrix is the * product of the corresponding matrices in `a` and `b`. */ def matmul( a: Output, b: Output, transposeA: Boolean = false, transposeB: Boolean = false, conjugateA: Boolean = false, conjugateB: Boolean = false, aIsSparse: Boolean = false, bIsSparse: Boolean = false, name: String = "MatMul"): Output = { val (cA, cB) = castArgs(a, b) val sparseMatMulDataTypes = Set[DataType](BFLOAT16, FLOAT32) if (!aIsSparse && !bIsSparse && (cA.rank == -1 || cA.rank > 2) && (cB.rank == -1 || cB.rank > 2)) { // "BatchMatMul" does not support transpose, so we conjugate the matrix and use adjoint instead. // The "conj" op is a no-op for real matrices. val (x, adjointX) = transposeConjugateToAdjoint(cA, transposeA, conjugateA) val (y, adjointY) = transposeConjugateToAdjoint(cB, transposeB, conjugateB) Op.Builder(opType = "BatchMatMul", name = name) .addInput(x) .addInput(y) .setAttribute("adj_x", adjointX) .setAttribute("adj_y", adjointY) .build().outputs(0) } else if (cA.dataType == BFLOAT16 || cB.dataType == BFLOAT16 || // "MatMul" does not currently support this type. ((aIsSparse || bIsSparse) && sparseMatMulDataTypes.contains(cA.dataType) && sparseMatMulDataTypes.contains(cB.dataType))) { val (x, transposeX) = transposeConjugateToTranspose(cA, transposeA, conjugateA) val (y, transposeY) = transposeConjugateToTranspose(cB, transposeB, conjugateB) Op.Builder(opType = "SparseMatMul", name = name) .addInput(x) .addInput(y) .setAttribute("transpose_a", transposeX) .setAttribute("transpose_b", transposeY) .setAttribute("a_is_sparse", aIsSparse) .setAttribute("b_is_sparse", bIsSparse) .build().outputs(0) } else { val (x, transposeX) = transposeConjugateToTranspose(cA, transposeA, conjugateA) val (y, transposeY) = transposeConjugateToTranspose(cB, transposeB, conjugateB) Op.Builder(opType = "MatMul", name = name) .addInput(x) .addInput(y) .setAttribute("transpose_a", transposeX) .setAttribute("transpose_b", transposeY) .build().outputs(0) } } private[this] def transposeConjugateToAdjoint( tensor: Output, transpose: Boolean, conj: Boolean): (Output, Boolean) = { (transpose, conj) match { case (false, false) => (tensor, false) case (false, true) => (conjugate(tensor), false) case (true, false) => (conjugate(tensor), true) case (true, true) => (tensor, true) } } private[this] def transposeConjugateToTranspose( tensor: Output, transpose: Boolean, conj: Boolean): (Output, Boolean) = { (transpose, conj) match { case (false, false) => (tensor, false) case (false, true) => (conjugate(tensor), false) case (true, false) => (tensor, true) case (true, true) => (conjugate(tensor), true) } } /** $OpDocMathCross * * @group MathOps * @param a First input tensor. * @param b Second input tensor. * @param name Name for the created op. * @return Created op output. */ def cross(a: Output, b: Output, name: String = "Cross"): Output = { val (cA, cB) = castArgs(a, b) Op.Builder(opType = "Cross", name = name) .addInput(cA) .addInput(cB) .build().outputs(0) } /** $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param numAxes Number of axes to contract. * @return Created op output. */ def tensorDot(a: Output, b: Output, numAxes: Int): Output = { tensorDot(a, b, numAxes, "TensorDot") } /** $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param numAxes Number of axes to contract. * @param name Name for the created ops. * @return Created op output. */ def tensorDot(a: Output, b: Output, numAxes: Int, name: String): Output = { if (numAxes < 1) throw InvalidArgumentException("'numAxes' must be at least 1.") if (a.rank == -1) throw InvalidArgumentException( "Cannot use 'tensorDot' with an unknown input tensor shape. Use 'tensorDotDynamic' instead.") tensorDot(a, b, a.rank - numAxes until a.rank, 0 until numAxes, name) } /** $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param axesA Axes to contract in `a`. * @param axesB Axes to contract in `b`. * @return Created op output. */ def tensorDot(a: Output, b: Output, axesA: Seq[Int], axesB: Seq[Int]): Output = { tensorDot(a, b, axesA, axesB, "TensorDot") } /** $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param axesA Axes to contract in `a`. * @param axesB Axes to contract in `b`. * @param name Name for the created ops. * @return Created op output. */ def tensorDot(a: Output, b: Output, axesA: Seq[Int], axesB: Seq[Int], name: String): Output = { if (axesA.lengthCompare(axesB.size) != 0) throw InvalidArgumentException( s"Different number of contraction axes for 'a' and 'b', ${axesA.size} != ${axesB.size}.") /** Helper method to perform transpose and reshape for the tensor contraction op. This method is helpful in reducing * `tensorDot` to `matmul` using the `transpose` and the `reshape` ops. The method takes a tensor and performs the * correct transpose and reshape operations for the provided indices. It returns the reshaped tensor as well as a * list of indices necessary to reshape the tensor back to its proper shape after the matrix multiplication. * * @param a Tensor being reshaped. * @param axes Sequence of unique indices of axes of `a`. * @param flipped If `true`, the method assumes that `a` is the second argument in the contraction operation. * @return Tuple that contains: (i) the reshaped tensor `a` that allows contraction via `matmul`, (ii) an `INT32` * tensor that contains the shape of the free axes, and (iii) a sequence of integers representing the * inferred static shape of the free axes. */ def tensorDotReshape(a: Output, axes: Seq[Int], flipped: Boolean = false): (Output, Output, Seq[Int]) = { if (a.shape.isFullyDefined) { val mappedAxes = axes.map(i => if (i >= 0) i else i + a.rank) val prodAxes = mappedAxes.map(a.shape(_)).product val free = (0 until a.rank).filter(!mappedAxes.contains(_)) val freeAxes = free.map(a.shape(_)) val prodFree = freeAxes.product val permutation = if (flipped) mappedAxes ++ free else free ++ mappedAxes val newShape = if (flipped) Shape(prodAxes, prodFree) else Shape(prodFree, prodAxes) val reshapedA = Basic.reshape(Basic.transpose(a, permutation), newShape) val freeAxesOutput = if (freeAxes.isEmpty) Basic.constant(Tensor(INT32)) else Basic.constant(freeAxes) (reshapedA, freeAxesOutput, freeAxes) } else { val (mappedAxes, freeAxesStatic) = { if (a.rank != -1) { val mappedAxes = axes.map(i => if (i >= 0) i else i + a.rank) val free = (0 until a.rank).filter(!mappedAxes.contains(_)) val freeAxes = free.map(a.shape(_)) (mappedAxes, freeAxes) } else { (axes, null) } } val shapeA = Basic.shape(a) val rankA = Basic.rank(a) var axesO = Basic.constant(mappedAxes, name = "Axes") axesO = ((axesO >= 0).cast(INT32) * axesO) + ((axesO < 0).cast(INT32) * (axesO + rankA)) val (free, _) = Basic.listDiff(Math.range(0, rankA), axesO) val freeAxes = Basic.gather(shapeA, free) val axesAxes = Basic.gather(shapeA, axesO) val prodFree = freeAxes.prod() val prodAxes = axesAxes.prod() val (permutation, newShape) = { if (flipped) { val permutation = Basic.concatenate(Seq(axesO, free), 0) val newShape = Basic.stack(Seq(prodAxes, prodFree)) (permutation, newShape) } else { val permutation = Basic.concatenate(Seq(free, axesO), 0) val newShape = Basic.stack(Seq(prodFree, prodAxes)) (permutation, newShape) } } val reshapedA = Basic.reshape(Basic.transpose(a, permutation), newShape) (reshapedA, freeAxes, freeAxesStatic) } } Op.createWithNameScope(name, Set(a.op, b.op)) { val (reshapedA, freeA, freeAStatic) = tensorDotReshape(a, axesA) val (reshapedB, freeB, freeBStatic) = tensorDotReshape(b, axesB, flipped = true) val abMatmul = matmul(reshapedA, reshapedB) val reshaped = Basic.reshape(abMatmul, Basic.concatenate(Seq(freeA, freeB), 0)) if (freeAStatic != null && freeBStatic != null) reshaped.setShape(Shape.fromSeq(freeAStatic ++ freeBStatic)) reshaped } } /** Dynamic version (i.e., where `numAxes` may be a symbolic tensor) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param numAxes Number of axes to contract. * @return Created op output. */ def tensorDotDynamic(a: Output, b: Output, numAxes: Output): Output = { tensorDotDynamic(a, b, numAxes, "TensorDot") } /** Dynamic version (i.e., where `numAxes` may be a symbolic tensor) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param numAxes Number of axes to contract. * @param name Name for the created ops. * @return Created op output. */ def tensorDotDynamic(a: Output, b: Output, numAxes: Output, name: String): Output = { if (numAxes.rank != 0) throw InvalidArgumentException("'numAxes' must be a scalar.") tensorDotDynamic(a, b, range(a.rank - numAxes, a.rank), range(0, numAxes), name) } /** Dynamic version (i.e., where `axesA` and `axesB` may be symbolic tensors) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param axesA Axes to contract in `a`. * @param axesB Axes to contract in `b`. * @return Created op output. */ def tensorDotDynamic(a: Output, b: Output, axesA: Output, axesB: Output): Output = { tensorDotDynamic(a, b, axesA, axesB, "TensorDot") } /** Dynamic version (i.e., where `axesA` and `axesB` may be symbolic tensors) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param a First tensor. * @param b Second tensor. * @param axesA Axes to contract in `a`. * @param axesB Axes to contract in `b`. * @param name Name for the created ops. * @return Created op output. */ def tensorDotDynamic(a: Output, b: Output, axesA: Output, axesB: Output, name: String = "TensorDot"): Output = { if (axesA.rank != 1) throw InvalidArgumentException("'axesA' must be a vector.") if (axesB.rank != 1) throw InvalidArgumentException("'axesB' must be a vector.") /** Helper method to perform transpose and reshape for the tensor contraction op. This method is helpful in reducing * `tensorDot` to `matmul` using the `transpose` and the `reshape` ops. The method takes a tensor and performs the * correct transpose and reshape operations for the provided indices. It returns the reshaped tensor as well as a * list of indices necessary to reshape the tensor back to its proper shape after the matrix multiplication. * * @param a Tensor being reshaped. * @param axes Sequence of unique indices of axes of `a`. * @param flipped If `true`, the method assumes that `a` is the second argument in the contraction operation. * @return Tuple that contains: (i) the reshaped tensor `a` that allows contraction via `matmul`, and (ii) an * `INT32` tensor that contains the shape of the free axes. */ def tensorDotReshape(a: Output, axes: Output, flipped: Boolean = false): (Output, Output) = { val shapeA = Basic.shape(a) val rankA = Basic.rank(a) val mappedAxes = ((axes >= 0).cast(INT32) * axes) + ((axes < 0).cast(INT32) * (axes + rankA)) val (free, _) = Basic.listDiff(Math.range(0, rankA), mappedAxes) val freeAxes = Basic.gather(shapeA, free) val axesAxes = Basic.gather(shapeA, mappedAxes) val prodFree = freeAxes.prod() val prodAxes = axesAxes.prod() val (permutation, newShape) = { if (flipped) { val permutation = Basic.concatenate(Seq(mappedAxes, free), 0) val newShape = Basic.stack(Seq(prodAxes, prodFree)) (permutation, newShape) } else { val permutation = Basic.concatenate(Seq(free, mappedAxes), 0) val newShape = Basic.stack(Seq(prodFree, prodAxes)) (permutation, newShape) } } val reshapedA = Basic.reshape(Basic.transpose(a, permutation), newShape) (reshapedA, freeAxes) } Op.createWithNameScope(name, Set(a.op, b.op)) { val (reshapedA, freeA) = tensorDotReshape(a, axesA) val (reshapedB, freeB) = tensorDotReshape(b, axesB, flipped = true) val abMatmul = matmul(reshapedA, reshapedB) Basic.reshape(abMatmul, Basic.concatenate(Seq(freeA, freeB), 0)) } } //endregion Matrix Ops //region Complex Ops /** $OpDocMathComplex * * @group MathOps * @param real Tensor containing the real component. Must have [[FLOAT32]] or [[FLOAT64]] data type. * @param imag Tensor containing the imaginary component. Must have [[FLOAT32]] or [[FLOAT64]] data type. * @param name Name for the created op. * @return Created op output with data type being either [[COMPLEX64]] or [[COMPLEX128]]. */ def complex(real: Output, imag: Output, name: String = "Complex"): Output = { val (cReal, cImag) = castArgs(real, imag) val outputDataType = (cReal.dataType, cImag.dataType) match { case (FLOAT32, FLOAT32) => COMPLEX64 case (FLOAT64, FLOAT64) => COMPLEX128 case _ => throw new IllegalArgumentException( s"'real' (dataType = ${real.dataType}) and 'imag' (dataType = ${imag.dataType}) must both have the same data " + s"type, which must be either 'FLOAT32' or 'FLOAT64'.") } Op.Builder(opType = "Complex", name = name) .addInput(cReal) .addInput(cImag) .setAttribute("Tout", outputDataType) .build().outputs(0) } /** $OpDocMathReal * * @group MathOps * @param input Input tensor. * @param name Name for the created op. * @return Created op output. */ def real[T <: OutputLike : OutputOps](input: T, name: String = "Real"): T = { if (!input.dataType.isComplex) { input } else { implicitly[OutputOps[T]] .applyUnary(input, o => Op.Builder(opType = "Real", name = name) .addInput(o) .setAttribute("Tout", o.dataType.real) .build().outputs(0)) } } /** $OpDocMathImag * * @group MathOps * @param input Input tensor. * @param name Name for the created op. * @return Created op output. */ def imag[T <: OutputLike : OutputOps](input: T, name: String = "Imag"): T = { if (!input.dataType.isComplex) { input } else { implicitly[OutputOps[T]] .applyUnary(input, o => Op.Builder(opType = "Imag", name = name) .addInput(o) .setAttribute("Tout", o.dataType.real) .build().outputs(0)) } } /** $OpDocMathAngle * * @group MathOps * @param input Input tensor. * @param name Name for the created op. * @return Created op output. * @throws IllegalArgumentException If the provided tensor is not numeric. */ @throws[IllegalArgumentException] def angle[T <: OutputLike : OutputOps](input: T, name: String = "Angle"): T = { implicitly[OutputOps[T]] .applyUnary(input, o => { if (o.dataType.isComplex) { Op.Builder(opType = "Angle", name = name) .addInput(o) .setAttribute("Tout", o.dataType.real) .build().outputs(0) } else if (o.dataType.isNumeric) { Basic.zerosLike(o) } else { throw new IllegalArgumentException("'angle' can only take numeric tensors as input.") } }) } /** $OpDocMathConjugate * * @group MathOps * @param input Input tensor. * @param name Name for the created op. * @return Created op output. * @throws IllegalArgumentException If the provided tensor is not numeric. */ @throws[IllegalArgumentException] def conjugate[T <: OutputLike : OutputOps](input: T, name: String = "Conjugate"): T = { implicitly[OutputOps[T]] .applyUnary(input, o => { if (o.dataType.isComplex) { Op.Builder(opType = "Conj", name = name) .addInput(o) .build().outputs(0) } else if (o.dataType.isNumeric) { o } else { throw new IllegalArgumentException("'conjugate' can only take numeric tensors as input.") } }) } //endregion Complex Ops //region Quantization Ops // TODO: [OPS] quantization //endregion Quantization Ops //region Bucketization Ops /** $OpDocMathBucketize * * @group MathOps * @param input Numeric tensor to bucketize. * @param boundaries Sorted sequence of `Float`s specifying the boundaries of the buckets. * @param name Name for the created op. * @return Created op output. */ def bucketize(input: Output, boundaries: Seq[Float], name: String = "Bucketize"): Output = { Op.Builder(opType = "Bucketize", name = name) .addInput(input) .setAttribute("boundaries", boundaries.toArray) .build().outputs(0) } //endregion Bucketization Ops //region Other Ops /** $OpDocMathZerosFraction * * @group MathOps * @param input Input tensor. * @param name Name for the created op. * @return Created op output, with `FLOAT32` data type. */ def zerosFraction(input: Output, name: String = "ZerosFraction"): Output = { Op.createWithNameScope(name, Set(input.op)) { val zero = Basic.constant(0, input.dataType, name = "Zero") mean(cast(equal(input, zero), FLOAT32)) } } //endregion Other Ops } object Math extends Math { case class MathOps(output: Output) { //region Math Operators /** $OpDocMathNegate * * @group MathOps * @return Result as a new tensor. */ def unary_- : Output = negate /** $OpDocMathAdd * * @group MathOps * @return Result as a new tensor. */ def +(other: Output): Output = add(other) /** $OpDocMathSubtract * * @group MathOps * @return Result as a new tensor. */ def -(other: Output): Output = subtract(other) /** $OpDocMathMultiply * * @group MathOps * @return Result as a new tensor. */ def *(other: Output): Output = multiply(other) private[this] def divHelper(x: Output, y: Output): Output = { if (x.dataType.isFloatingPoint || x.dataType.isComplex || y.dataType.isFloatingPoint || y.dataType.isComplex) Math.divide(x, y) else Math.truncateDivide(x, y) } /** $OpDocMathDivide * * @group MathOps * @return Result as a new tensor. */ def /(other: Output): Output = divHelper(output, other) /** $OpDocMathMod * * @group MathOps * @return Result as a new tensor. */ def %(other: Output): Output = mod(other) /** $OpDocMathPow * * @group MathOps * @return Result as a new tensor. */ def **(other: Output): Output = pow(other) /** $OpDocMathPow * * @group MathOps * @return Result as a new tensor. */ def ^(other: Output): Output = pow(other) /** $OpDocMathLogicalNot * * @group MathOps * @return Result as a new tensor. */ def unary_! : Output = logicalNot /** $OpDocMathLogicalAnd * * @group MathOps * @return Result as a new tensor. */ def &&(other: Output): Output = logicalAnd(other) /** $OpDocMathLogicalOr * * @group MathOps * @return Result as a new tensor. */ def ||(other: Output): Output = logicalOr(other) /** $OpDocMathEqual * * @group MathOps * @return Result as a new tensor. */ def ==(other: Output): Output = equal(other) /** $OpDocMathNotEqual * * @group MathOps * @return Result as a new tensor. */ def !=(other: Output): Output = notEqual(other) /** $OpDocMathLess * * @group MathOps * @return Result as a new tensor. */ def <(other: Output): Output = less(other) /** $OpDocMathLessEqual * * @group MathOps * @return Result as a new tensor. */ def <=(other: Output): Output = lessEqual(other) /** $OpDocMathGreater * * @group MathOps * @return Result as a new tensor. */ def >(other: Output): Output = greater(other) /** $OpDocMathGreaterEqual * * @group MathOps * @return Result as a new tensor. */ def >=(other: Output): Output = greaterEqual(other) //endregion Math Operators /** $OpDocMathCast * * @group MathOps * @param dataType Target data type. * @return Result as a new tensor. */ def cast(dataType: DataType): Output = Math.cast(output, dataType) /** $OpDocMathBitcast * * @group MathOps * @param dataType Target data type. * @return Result as a new tensor. */ def bitcast(dataType: DataType): Output = Math.bitcast(output, dataType) //region Math Unary Ops /** $OpDocMathAbs * * @group MathOps * @return Result as a new tensor. */ def abs: Output = Math.abs(output) /** $OpDocMathNegate * * @group MathOps * @return Result as a new tensor. */ def negate: Output = Math.negate(output) /** $OpDocMathReciprocal * * @group MathOps * @return Result as a new tensor. */ def reciprocal: Output = Math.reciprocal(output) /** $OpDocMathSquare * * @group MathOps * @return Result as a new tensor. */ def square: Output = Math.square(output) /** $OpDocMathSqrt * * @group MathOps * @return Result as a new tensor. */ def sqrt: Output = Math.sqrt(output) /** $OpDocMathRsqrt * * @group MathOps * @return Result as a new tensor. */ def rsqrt: Output = Math.rsqrt(output) /** $OpDocMathExp * * @group MathOps * @return Result as a new tensor. */ def exp: Output = Math.exp(output) /** $OpDocMathExpm1 * * @group MathOps * @return Result as a new tensor. */ def expm1: Output = Math.expm1(output) /** $OpDocMathLog * * @group MathOps * @return Result as a new tensor. */ def log: Output = Math.log(output) /** $OpDocMathLog1p * * @group MathOps * @return Result as a new tensor. */ def log1p: Output = Math.log1p(output) /** $OpDocMathSin * * @group MathOps * @return Result as a new tensor. */ def sin: Output = Math.sin(output) /** $OpDocMathCos * * @group MathOps * @return Result as a new tensor. */ def cos: Output = Math.cos(output) /** $OpDocMathTan * * @group MathOps * @return Result as a new tensor. */ def tan: Output = Math.tan(output) /** $OpDocMathAsin * * @group MathOps * @return Result as a new tensor. */ def asin: Output = Math.asin(output) /** $OpDocMathAcos * * @group MathOps * @return Result as a new tensor. */ def acos: Output = Math.acos(output) /** $OpDocMathAtan * * @group MathOps * @return Result as a new tensor. */ def atan: Output = Math.atan(output) /** $OpDocMathSinh * * @group MathOps * @return Result as a new tensor. */ def sinh: Output = Math.sinh(output) /** $OpDocMathCosh * * @group MathOps * @return Result as a new tensor. */ def cosh: Output = Math.cosh(output) /** $OpDocMathTanh * * @group MathOps * @return Result as a new tensor. */ def tanh: Output = Math.tanh(output) /** $OpDocMathAsinh * * @group MathOps * @return Result as a new tensor. */ def asinh: Output = Math.asinh(output) /** $OpDocMathAcosh * * @group MathOps * @return Result as a new tensor. */ def acosh: Output = Math.acosh(output) /** $OpDocMathAtanh * * @group MathOps * @return Result as a new tensor. */ def atanh: Output = Math.atanh(output) /** $OpDocMathLogGamma * * @group MathOps * @return Result as a new tensor. */ def logGamma: Output = Math.logGamma(output) /** $OpDocMathDigamma * * @group MathOps * @return Result as a new tensor. */ def digamma: Output = Math.digamma(output) /** $OpDocMathErf * * @group MathOps * @return Result as a new tensor. */ def erf: Output = Math.erf(output) /** $OpDocMathErfc * * @group MathOps * @return Result as a new tensor. */ def erc: Output = Math.erfc(output) /** $OpDocMathSigmoid * * @group MathOps * @return Result as a new tensor. */ def sigmoid: Output = Math.sigmoid(output) /** $OpDocMathSign * * @group MathOps * @return Result as a new tensor. */ def sign: Output = Math.sign(output) /** $OpDocMathRound * * @group MathOps * @return Result as a new tensor. */ def round: Output = Math.round(output) /** $OpDocMathRoundInt * * @group MathOps * @return Result as a new tensor. */ def roundInt: Output = Math.roundInt(output) /** $OpDocMathFloor * * @group MathOps * @return Result as a new tensor. */ def floor: Output = Math.floor(output) /** $OpDocMathCeil * * @group MathOps * @return Result as a new tensor. */ def ceil: Output = Math.ceil(output) /** $OpDocMathIsNaN * * @group MathOps * @return Result as a new tensor. */ def isNaN: Output = Math.isNaN(output) /** $OpDocMathIsInf * * @group MathOps * @return Result as a new tensor. */ def isInf: Output = Math.isInf(output) /** $OpDocMathIsFinite * * @group MathOps * @return Result as a new tensor. */ def isFinite: Output = Math.isFinite(output) //endregion Math Unary Ops //region Math Binary Ops /** $OpDocMathAdd * * @group MathOps * @return Result as a new tensor. */ def add(other: Output): Output = Math.add(output, other) /** $OpDocMathSubtract * * @group MathOps * @return Result as a new tensor. */ def subtract(other: Output): Output = Math.subtract(output, other) /** $OpDocMathMultiply * * @group MathOps * @return Result as a new tensor. */ def multiply(other: Output): Output = Math.multiply(output, other) /** $OpDocMathDivide * * @group MathOps * @return Result as a new tensor. */ def divide(other: Output): Output = Math.divide(output, other) /** $OpDocMathFloorDivide * * @group MathOps * @return Result as a new tensor. */ @deprecated("Use `truncateDivide` instead.", "0.1") def floorDivide(other: Output): Output = Math.floorDivide(output, other) /** $OpDocMathTruncateDivide * * @group MathOps * @return Result as a new tensor. */ def truncateDivide(other: Output): Output = Math.truncateDivide(output, other) /** $OpDocMathRealDivide * * @group MathOps * @return Result as a new tensor. */ def realDivide(other: Output): Output = Math.realDivide(output, other) /** $OpDocMathSquaredDifference * * @group MathOps * @return Result as a new tensor. */ def squaredDifference(other: Output): Output = Math.squaredDifference(output, other) /** $OpDocMathMod * * @group MathOps * @return Result as a new tensor. */ def mod(other: Output): Output = Math.mod(output, other) /** $OpDocMathFloorMod * * @group MathOps * @return Result as a new tensor. */ def floorMod(other: Output): Output = Math.floorMod(output, other) /** $OpDocMathTruncateMod * * @group MathOps * @return Result as a new tensor. */ def truncateMod(other: Output): Output = Math.truncateMod(output, other) /** $OpDocMathPow * * @group MathOps * @return Result as a new tensor. */ def pow(other: Output): Output = Math.pow(output, other) /** $OpDocMathIgammac * * @group MathOps * @return Result as a new tensor. */ def igammac(other: Output): Output = Math.igammac(output, other) /** $OpDocMathIgamma * * @group MathOps * @return Result as a new tensor. */ def igamma(other: Output): Output = Math.igamma(output, other) /** $OpDocMathZeta * * @group MathOps * @return Result as a new tensor. */ def zeta(other: Output): Output = Math.zeta(output, other) /** $OpDocMathPolygamma * * @group MathOps * @return Result as a new tensor. */ def polygamma(other: Output): Output = Math.polygamma(output, other) /** $OpDocMathAtan2 * * @group MathOps * @return Result as a new tensor. */ def atan2(other: Output): Output = Math.atan2(output, other) /** $OpDocMathMinimum * * @group MathOps * @return Result as a new tensor. */ def minimum(other: Output): Output = Math.minimum(output, other) /** $OpDocMathMaximum * * @group MathOps * @return Result as a new tensor. */ def maximum(other: Output): Output = Math.maximum(output, other) //endregion Math Binary Ops //region Math Logical Ops /** $OpDocMathLogicalNot * * @group MathOps * @return Result as a new tensor. */ def logicalNot: Output = Math.logicalNot(output) /** $OpDocMathLogicalAnd * * @group MathOps * @return Result as a new tensor. */ def logicalAnd(other: Output): Output = Math.logicalAnd(output, other) /** $OpDocMathLogicalOr * * @group MathOps * @return Result as a new tensor. */ def logicalOr(other: Output): Output = Math.logicalOr(output, other) /** $OpDocMathLogicalXOr * * @group MathOps * @return Result as a new tensor. */ def logicalXOr(other: Output): Output = Math.logicalXOr(output, other) //endregion Math Logical Ops //region Math Comparison Ops /** $OpDocMathEqual * * @group MathOps * @return Result as a new tensor. */ def equal(other: Output): Output = Math.equal(output, other) /** $OpDocMathNotEqual * * @group MathOps * @return Result as a new tensor. */ def notEqual(other: Output): Output = Math.notEqual(output, other) /** $OpDocMathApproximatelyEqual * * @group MathOps * @return Result as a new tensor. */ def approximatelyEqual(other: Output): Output = Math.approximatelyEqual(output, other) /** $OpDocMathLess * * @group MathOps * @return Result as a new tensor. */ def less(other: Output): Output = Math.less(output, other) /** $OpDocMathLessEqual * * @group MathOps * @return Result as a new tensor. */ def lessEqual(other: Output): Output = Math.lessEqual(output, other) /** $OpDocMathGreater * * @group MathOps * @return Result as a new tensor. */ def greater(other: Output): Output = Math.greater(output, other) /** $OpDocMathGreaterEqual * * @group MathOps * @return Result as a new tensor. */ def greaterEqual(other: Output): Output = Math.greaterEqual(output, other) //endregion Math Comparison Ops //region Math Reduction Ops /** $OpDocMathSum * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def sum(axes: Output = null, keepDims: Boolean = false): Output = Math.sum(output, axes, keepDims) /** $OpDocMathMean * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def mean(axes: Output = null, keepDims: Boolean = false): Output = Math.mean(output, axes, keepDims) /** $OpDocMathProd * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def prod(axes: Output = null, keepDims: Boolean = false): Output = Math.prod(output, axes, keepDims) /** $OpDocMathMin * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def min(axes: Output = null, keepDims: Boolean = false): Output = Math.min(output, axes, keepDims) /** $OpDocMathMax * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def max(axes: Output = null, keepDims: Boolean = false): Output = Math.max(output, axes, keepDims) /** $OpDocMathAll * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def all(axes: Output = null, keepDims: Boolean = false): Output = Math.all(output, axes, keepDims) /** $OpDocMathAny * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def any(axes: Output = null, keepDims: Boolean = false): Output = Math.any(output, axes, keepDims) /** $OpDocMathLogSumExp * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def logSumExp(axes: Output = null, keepDims: Boolean = false): Output = Math.logSumExp(output, axes, keepDims) /** $OpDocMathCountNonZero * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param keepDims If `true`, retain the reduced axes. * @return Result as a new tensor. */ def countNonZero(axes: Output = null, keepDims: Boolean = false): Output = Math.countNonZero(output, axes, keepDims) //endregion Math Reduction Ops /** $OpDocMathArgmax * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param outputDataType Data type for the output tensor. Must be `INT32` or `INT64`. * @return Result as a new tensor. */ def argmax(axes: Output = 0, outputDataType: DataType = INT64): Output = Math.argmax(output, axes, outputDataType) /** $OpDocMathArgmin * * @group MathOps * @param axes Integer tensor containing the axes to reduce. If `null`, then all axes are reduced. * @param outputDataType Data type for the output tensor. Must be `INT32` or `INT64`. * @return Result as a new tensor. */ def argmin(axes: Output = 0, outputDataType: DataType = INT64): Output = Math.argmin(output, axes, outputDataType) /** $OpDocMathBinCount * * @group MathOps * @param weights If not `null`, this tensor must have the same shape as `input`. For each value in `input`, the * corresponding bin count will be incremented by the corresponding weight instead of `1`. * @param minLength If not `null`, this ensures the output has length at least `minLength`, padding with zeros at * the end, if necessary. * @param maxLength If not `null`, this skips values in `input` that are equal or greater than `maxLength`, * ensuring that the output has length at most `maxLength`. * @param dataType If `weights` is `null`, this determines the data type used for the output tensor (i.e., the * tensor containing the bin counts). * @return Result as a new tensor. */ def binCount( weights: Output = null, minLength: Output = null, maxLength: Output = null, dataType: DataType = INT32): Output = { Math.binCount(output, weights, minLength, maxLength, dataType) } /** $OpDocMathCumsum * * @group MathOps * @param axis [[INT32]] tensor containing the axis along which to perform the cumulative sum. * @param exclusive Boolean value indicating whether to perform an exclusive cumulative sum. * @param reverse Boolean value indicating whether to perform a reverse cumulative sum. * @return Result as a new tensor. */ def cumsum(axis: Output = 0, exclusive: Boolean = false, reverse: Boolean = false): Output = { Math.cumsum(output, axis, exclusive, reverse) } /** $OpDocMathCumprod * * @group MathOps * @param axis [[INT32]] tensor containing the axis along which to perform the cumulative product. * @param exclusive Boolean value indicating whether to perform an exclusive cumulative product. * @param reverse Boolean value indicating whether to perform a reverse cumulative product. * @return Result as a new tensor. */ def cumprod(axis: Output = 0, exclusive: Boolean = false, reverse: Boolean = false): Output = { Math.cumprod(output, axis, exclusive, reverse) } //region Math Segment Ops /** $OpDocMathSegmentSum * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @return Result as a new tensor. */ def segmentSum(segmentIndices: Output): Output = Math.segmentSum(output, segmentIndices) /** $OpDocMathSegmentMean * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @return Result as a new tensor. */ def segmentMean(segmentIndices: Output): Output = Math.segmentMean(output, segmentIndices) /** $OpDocMathSegmentProd * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @return Result as a new tensor. */ def segmentProd(segmentIndices: Output): Output = Math.segmentProd(output, segmentIndices) /** $OpDocMathSegmentMin * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @return Result as a new tensor. */ def segmentMin(segmentIndices: Output): Output = Math.segmentMin(output, segmentIndices) /** $OpDocMathSegmentMax * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @return Result as a new tensor. */ def segmentMax(segmentIndices: Output): Output = Math.segmentMax(output, segmentIndices) /** $OpDocMathUnsortedSegmentSum * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @return Result as a new tensor. */ def unsortedSegmentSum(segmentIndices: Output, segmentsNumber: Output): Output = { Math.unsortedSegmentSum(output, segmentIndices, segmentsNumber) } /** $OpDocMathUnsortedSegmentMean * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @return Result as a new tensor. */ def unsortedSegmentMean(segmentIndices: Output, segmentsNumber: Output): Output = { Math.unsortedSegmentMean(output, segmentIndices, segmentsNumber) } /** $OpDocMathUnsortedSegmentProd * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @return Result as a new tensor. */ def unsortedSegmentProd(segmentIndices: Output, segmentsNumber: Output): Output = { Math.unsortedSegmentProd(output, segmentIndices, segmentsNumber) } /** $OpDocMathUnsortedSegmentMin * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @return Result as a new tensor. */ def unsortedSegmentMin(segmentIndices: Output, segmentsNumber: Output): Output = { Math.unsortedSegmentMin(output, segmentIndices, segmentsNumber) } /** $OpDocMathUnsortedSegmentMax * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @return Result as a new tensor. */ def unsortedSegmentMax(segmentIndices: Output, segmentsNumber: Output): Output = { Math.unsortedSegmentMax(output, segmentIndices, segmentsNumber) } /** $OpDocMathUnsortedSegmentSqrtN * * @group MathOps * * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). * @param segmentsNumber Number of segments (must have data type of [[INT32]]). * @return Result as a new tensor. */ def unsortedSegmentSqrtN(segmentIndices: Output, segmentsNumber: Output): Output = { Math.unsortedSegmentSqrtN(output, segmentIndices, segmentsNumber) } /** $OpDocMathSparseSegmentSum * * @group MathOps * * @param indices One-dimensional tensor with rank equal to that of `segmentIndices`. * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param numSegments Optional `INT32` scalar indicating the size of the output tensor. * @return Result as a new tensor. */ def sparseSegmentSum(indices: Output, segmentIndices: Output, numSegments: Output = null): Output = { Math.sparseSegmentSum(output, indices, segmentIndices, numSegments) } /** $OpDocMathSparseSegmentMean * * @group MathOps * * @param indices One-dimensional tensor with rank equal to that of `segmentIndices`. * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param numSegments Optional `INT32` scalar indicating the size of the output tensor. * @return Result as a new tensor. */ def sparseSegmentMean(indices: Output, segmentIndices: Output, numSegments: Output = null): Output = { Math.sparseSegmentMean(output, indices, segmentIndices, numSegments) } /** $OpDocMathSparseSegmentSumSqrtN * * @group MathOps * * @param indices One-dimensional tensor with rank equal to that of `segmentIndices`. * @param segmentIndices Segment indices (must have data type of [[INT32]] or [[INT64]]). Values should be sorted * and can be repeated. * @param numSegments Optional `INT32` scalar indicating the size of the output tensor. * @return Result as a new tensor. */ def sparseSegmentSumSqrtN(indices: Output, segmentIndices: Output, numSegments: Output = null): Output = { Math.sparseSegmentSumSqrtN(output, indices, segmentIndices, numSegments) } //endregion Math Segment Ops //region Math Matrix Ops /** $OpDocMathDiag * * @group MathOps * * @return Result as a new tensor. */ def diag: Output = Math.diag(output) /** $OpDocMathDiagPart * * @group MathOps * * @return Result as a new tensor. */ def diagPart: Output = Math.diagPart(output) /** $OpDocMathMatrixDiag * * @group MathOps * * @return Result as a new tensor with rank equal to `K + 1` and shape equal to the shape of `diagonal`, with its * last dimension duplicated. */ def matrixDiag: Output = Math.matrixDiag(output) /** $OpDocMathMatrixSetDiag * * @group MathOps * * @param diagonal Rank-`K` tensor, where `K >= 1`. * @return Result as a new tensor with rank equal to `K + 1` and shape equal to the shape of `input`. */ def matrixSetDiag(diagonal: Output): Output = Math.matrixSetDiag(output, diagonal) /** $OpDocMathMatrixDiagPart * * @group MathOps * * @return Result as a new tensor containing the diagonal(s) and having shape equal to * `input.shape[:-2] + [min(input.shape[-2:])]`. */ def matrixDiagPart: Output = Math.matrixDiagPart(output) /** $OpDocMathMatrixBandPart * * @group MathOps * * @param numSubDiagonals Scalar `INT64` tensor that contains the number of sub-diagonals to keep. If negative, * the entire lower triangle is kept. * @param numSuperDiagonals Scalar `INT64` tensor that contains the number of super-diagonals to keep. If negative, * the entire upper triangle is kept. * @return Result as a new tensor containing the expected banded tensor and has rank `K` and same shape as `input`. */ def matrixBandPart(numSubDiagonals: Output, numSuperDiagonals: Output): Output = { Math.matrixBandPart(output, numSubDiagonals, numSuperDiagonals) } /** $OpDocMathTrace * * @group MathOps * * @return Result as a new tensor. */ def trace: Output = Math.trace(output) /** $OpDocMathMatmul * * @group MathOps * * @param other Output to multiply with, with data type one of: `BFLOAT16`, `FLOAT16`, `FLOAT32`, `FLOAT64`, * `INT32`, `COMPLEX64`, `COMPLEX128`. * @param transposeA If `true`, this tensor is transposed before the multiplication. * @param transposeB If `true`, `other` is transposed before the multiplication. * @param conjugateA If `true`, this tensor is conjugated before the multiplication. * @param conjugateB If `true`, `other` is conjugated before the multiplication. * @param aIsSparse If `true`, this tensor is treated as a sparse matrix (i.e., it is assumed it contains many * zeros). * @param bIsSparse If `true`, `other` is treated as a sparse matrix (i.e., it is assumed it contains many * zeros). * @return Result as a new tensor. */ def matmul( other: Output, transposeA: Boolean = false, transposeB: Boolean = false, conjugateA: Boolean = false, conjugateB: Boolean = false, aIsSparse: Boolean = false, bIsSparse: Boolean = false): Output = { Math.matmul(output, other, transposeA, transposeB, conjugateA, conjugateB, aIsSparse, bIsSparse) } /** $OpDocMathCross * * @group MathOps * * @return Result as a new tensor. */ def cross(other: Output): Output = Math.cross(output, other) /** $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param numAxes Number of axes to contract. * @return Created op output. */ def tensorDot(other: Output, numAxes: Int): Output = { Math.tensorDot(output, other, numAxes) } /** $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param numAxes Number of axes to contract. * @param name Name for the created ops. * @return Created op output. */ def tensorDot(other: Output, numAxes: Int, name: String): Output = { Math.tensorDot(output, other, numAxes, name) } /** $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param axes Axes to contract in this tensor. * @param axesOther Axes to contract in `other`. * @return Created op output. */ def tensorDot(other: Output, axes: Seq[Int], axesOther: Seq[Int]): Output = { Math.tensorDot(output, other, axes, axesOther) } /** $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param axes Axes to contract in this tensor. * @param axesOther Axes to contract in `other`. * @param name Name for the created ops. * @return Created op output. */ def tensorDot(other: Output, axes: Seq[Int], axesOther: Seq[Int], name: String): Output = { Math.tensorDot(output, other, axes, axesOther, name) } /** Dynamic version (i.e., where `numAxes` may be a symbolic tensor) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param numAxes Number of axes to contract. * @return Created op output. */ def tensorDotDynamic(other: Output, numAxes: Output): Output = { Math.tensorDotDynamic(output, other, numAxes) } /** Dynamic version (i.e., where `numAxes` may be a symbolic tensor) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param numAxes Number of axes to contract. * @param name Name for the created ops. * @return Created op output. */ def tensorDotDynamic(other: Output, numAxes: Output, name: String): Output = { Math.tensorDotDynamic(output, other, numAxes, name) } /** Dynamic version (i.e., where `axes` and `axesOther` may be symbolic tensors) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param axes Axes to contract in this tensor. * @param axesOther Axes to contract in `other`. * @return Created op output. */ def tensorDotDynamic(other: Output, axes: Output, axesOther: Output): Output = { Math.tensorDotDynamic(output, other, axes, axesOther) } /** Dynamic version (i.e., where `axes` and `axesOther` may be symbolic tensors) of the `tensorDot` op. * * $OpDocMathTensorDot * * @group MathOps * @param other Tensor to contract with. * @param axes Axes to contract in this tensor. * @param axesOther Axes to contract in `other`. * @param name Name for the created ops. * @return Created op output. */ def tensorDotDynamic(other: Output, axes: Output, axesOther: Output, name: String): Output = { Math.tensorDotDynamic(output, other, axes, axesOther, name) } //endregion Math Matrix Ops //region Math Complex Ops /** $OpDocMathReal * * @group MathOps * * @return Result as a new tensor. */ def real: Output = Math.real(output) /** $OpDocMathImag * * @group MathOps * * @return Result as a new tensor. */ def imag: Output = Math.imag(output) /** $OpDocMathAngle * * @group MathOps * * @return Result as a new tensor. */ def angle: Output = Math.angle(output) /** $OpDocMathConjugate * * @group MathOps * * @return Result as a new tensor. */ def conjugate: Output = Math.conjugate(output) //endregion Math Complex Ops //region Math Quantization Ops // TODO: [OPS] quantization //endregion Math Quantization Ops //region Math Bucketization Ops /** $OpDocMathBucketize * * @group MathOps * * @param boundaries Sorted sequence of `Float`s specifying the boundaries of the buckets. * @return Result as a new tensor. */ def bucketize(boundaries: Seq[Float]): Output = Math.bucketize(output, boundaries) //endregion Math Bucketization Ops //region Math Other Ops /** $OpDocMathZerosFraction * * @group MathOps * * @return Result as a new tensor, with `FLOAT32` data type. */ def zerosFraction: Output = Math.zerosFraction(output) //endregion Math Other Ops } private[ops] object Gradients { GradientsRegistry.registerNonDifferentiable("Range") GradientsRegistry.registerNonDifferentiable("LinSpace") GradientsRegistry.registerNonDifferentiable("IsNan") GradientsRegistry.registerNonDifferentiable("IsInf") GradientsRegistry.registerNonDifferentiable("IsFinite") GradientsRegistry.registerNonDifferentiable("LogicalNot") GradientsRegistry.registerNonDifferentiable("LogicalAnd") GradientsRegistry.registerNonDifferentiable("LogicalOr") GradientsRegistry.registerNonDifferentiable("Equal") GradientsRegistry.registerNonDifferentiable("NotEqual") GradientsRegistry.registerNonDifferentiable("ApproximateEqual") GradientsRegistry.registerNonDifferentiable("Less") GradientsRegistry.registerNonDifferentiable("LessEqual") GradientsRegistry.registerNonDifferentiable("Greater") GradientsRegistry.registerNonDifferentiable("GreaterEqual") GradientsRegistry.register("Select", selectGradient) GradientsRegistry.register("Cast", castGradient) GradientsRegistry.register("AddN", addNGradient) GradientsRegistry.register("AccumulateNV2", accumulateNGradient) GradientsRegistry.register("Abs", absGradient) GradientsRegistry.register("ComplexAbs", complexAbsGradient) GradientsRegistry.register("Neg", negateGradient) GradientsRegistry.register("Reciprocal", reciprocalGradient) GradientsRegistry.register("ReciprocalGrad", reciprocalHessian) GradientsRegistry.register("Square", squareGradient) GradientsRegistry.register("Sqrt", sqrtGradient) GradientsRegistry.register("SqrtGrad", sqrtHessian) GradientsRegistry.register("Rsqrt", rsqrtGradient) GradientsRegistry.register("RsqrtGrad", rsqrtHessian) GradientsRegistry.register("Exp", expGradient) GradientsRegistry.register("Expm1", expm1Gradient) GradientsRegistry.register("Log", logGradient) GradientsRegistry.register("Log1p", log1pGradient) GradientsRegistry.register("Sin", sinGradient) GradientsRegistry.register("Cos", cosGradient) GradientsRegistry.register("Tan", tanGradient) GradientsRegistry.register("Asin", asinGradient) GradientsRegistry.register("Acos", acosGradient) GradientsRegistry.register("Atan", atanGradient) GradientsRegistry.register("Sinh", sinhGradient) GradientsRegistry.register("Cosh", coshGradient) GradientsRegistry.register("Tanh", tanhGradient) GradientsRegistry.register("TanhGrad", tanhHessian) GradientsRegistry.register("Asinh", asinhGradient) GradientsRegistry.register("Acosh", acoshGradient) GradientsRegistry.register("Atanh", atanhGradient) GradientsRegistry.register("Lgamma", lgammaGradient) GradientsRegistry.register("Digamma", digammaGradient) GradientsRegistry.register("Erf", erfGradient) GradientsRegistry.register("Erfc", erfcGradient) GradientsRegistry.register("Sigmoid", sigmoidGradient) GradientsRegistry.register("SigmoidGrad", sigmoidHessian) GradientsRegistry.register("Sign", signGradient) GradientsRegistry.register("Round", roundGradient) GradientsRegistry.register("Rint", rintGradient) GradientsRegistry.register("Floor", floorGradient) GradientsRegistry.register("Ceil", ceilGradient) GradientsRegistry.register("Add", addGradient) GradientsRegistry.register("Sub", subGradient) GradientsRegistry.register("Mul", mulGradient) GradientsRegistry.register("Div", divGradient) GradientsRegistry.register("FloorDiv", floorDivGradient) GradientsRegistry.register("TruncateDiv", truncateDivGradient) GradientsRegistry.register("RealDiv", realDivGradient) GradientsRegistry.register("SquaredDifference", squaredDifferenceGradient) GradientsRegistry.register("Pow", powGradient) GradientsRegistry.register("Igammac", igammacGradient) GradientsRegistry.register("Igamma", igammaGradient) GradientsRegistry.register("Zeta", zetaGradient) GradientsRegistry.register("Polygamma", polygammaGradient) GradientsRegistry.register("Atan2", atan2Gradient) GradientsRegistry.register("Minimum", minimumGradient) GradientsRegistry.register("Maximum", maximumGradient) GradientsRegistry.register("Betainc", betaIncGradient) GradientsRegistry.register("Sum", sumGradient) GradientsRegistry.register("Mean", meanGradient) GradientsRegistry.register("Prod", prodGradient) GradientsRegistry.register("Min", minOrMaxGradient) GradientsRegistry.register("Max", minOrMaxGradient) GradientsRegistry.register("Cumsum", cumsumGradient) GradientsRegistry.register("Cumprod", cumprodGradient) GradientsRegistry.register("SegmentSum", segmentSumGradient) GradientsRegistry.register("SegmentMean", segmentMeanGradient) GradientsRegistry.register("SegmentMin", segmentMinOrMaxGradient) GradientsRegistry.register("SegmentMax", segmentMinOrMaxGradient) GradientsRegistry.register("UnsortedSegmentSum", unsortedSegmentSumGradient) GradientsRegistry.register("UnsortedSegmentProd", unsortedSegmentProdGradient) GradientsRegistry.register("UnsortedSegmentMin", unsortedSegmentMinOrMaxGradient) GradientsRegistry.register("UnsortedSegmentMax", unsortedSegmentMinOrMaxGradient) GradientsRegistry.register("SparseSegmentSum", sparseSegmentSumGradient) GradientsRegistry.register("SparseSegmentSumWithNumSegments", sparseSegmentSumWithNumSegmentsGradient) GradientsRegistry.register("SparseSegmentMean", sparseSegmentMeanGradient) GradientsRegistry.register("SparseSegmentMeanWithNumSegments", sparseSegmentMeanWithNumSegmentsGradient) GradientsRegistry.register("SparseSegmentSqrtN", sparseSegmentSumSqrtNGradient) GradientsRegistry.register("SparseSegmentSqrtNWithNumSegments", sparseSegmentSumSqrtNWithNumSegmentsGradient) GradientsRegistry.register("Diag", diagGradient) GradientsRegistry.register("DiagPart", diagPartGradient) GradientsRegistry.register("MatrixDiag", matrixDiagGradient) GradientsRegistry.register("MatrixSetDiag", matrixSetDiagGradient) GradientsRegistry.register("MatrixDiagPart", matrixDiagPartGradient) GradientsRegistry.register("MatrixBandPart", matrixBandPartGradient) GradientsRegistry.register("BatchMatMul", batchMatMulGradient) GradientsRegistry.register("MatMul", matMulGradient) GradientsRegistry.register("SparseMatMul", sparseMatMulGradient) GradientsRegistry.register("Cross", crossGradient) GradientsRegistry.register("Complex", complexGradient) GradientsRegistry.register("Real", realGradient) GradientsRegistry.register("Imag", imagGradient) GradientsRegistry.register("Conj", conjGradient) private[this] def selectGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val grad = outputGradients.head val c = op.inputs(0) val x = op.inputs(1) val zeros = Basic.zerosLike(x) Seq[OutputLike](null, select(c, grad, zeros), select(c, zeros, grad)) } private[this] def castGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val supportedDataTypes = Seq(FLOAT16, FLOAT32, FLOAT64, BFLOAT16, COMPLEX64, COMPLEX128) val sourceDataType = op.inputs(0).dataType val destinationDataType = outputGradients.head.dataType if (supportedDataTypes.contains(sourceDataType) && supportedDataTypes.contains(destinationDataType)) Seq(cast(outputGradients.head, sourceDataType)) else Seq(null) } private[this] def addNGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq.fill(op.numInputs)(outputGradients.head) } private[this] def accumulateNGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq.fill(op.numInputs)(outputGradients.head) } private[this] def absGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(multiply(outputGradients.head.toOutput, sign(op.inputs(0)))) } private[this] def complexAbsGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput Seq(multiply(complex(outputGradient, Basic.zerosLike(outputGradient)), sign(op.inputs(0)))) } private[this] def negateGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(negate(outputGradients.head)) } private[this] def unaryGradientOp( y: Output, outputGradients: Seq[OutputLike], opType: String, name: String): Seq[OutputLike] = { val outputGradient = outputGradients.head val gradient = outputGradient match { case g: Output => Op.Builder(opType = opType, name = name) .addInput(y) .addInput(g) .build().outputs(0) case g: OutputIndexedSlices => val values = Op.Builder(opType = opType, name = name) .addInput(y) .addInput(g) .build().outputs(0) OutputIndexedSlices(indices = g.indices, values = values, denseShape = g.denseShape) case g: SparseOutput => val values = Op.Builder(opType = opType, name = name) .addInput(y) .addInput(g) .build().outputs(0) SparseOutput(indices = g.indices, values = values, denseShape = g.denseShape) } Seq(gradient) } private[this] def reciprocalGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { unaryGradientOp(op.outputs(0), outputGradients, opType = "ReciprocalGrad", name = "ReciprocalGradient") } private[this] def reciprocalHessian(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val a = op.inputs(0) val b = op.inputs(1) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { val ca = conjugate(a) val cg = conjugate(outputGradient) val rg = unaryGradientOp(ca, outputGradients, opType = "ReciprocalGrad", name = "ReciprocalGradient") Seq(Basic.constant(-2, cg.dataType) * cg * b * ca, rg.head) } } private[this] def squareGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head // Using control dependencies to prevent 2*x from being computed too early. Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * (Basic.constant(2, x.dataType) * conjugate(x))) } } private[this] def sqrtGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { unaryGradientOp(op.outputs(0), outputGradients, opType = "SqrtGrad", name = "SqrtGradient") } private[this] def sqrtHessian(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val a = op.inputs(0) val y = op.outputs(0) val outputGradient = outputGradients.head.toOutput Op.createWith(controlDependencies = Set(outputGradient.op)) { val ga = divide(outputGradient, a) Seq(negate(conjugate(ga)) * y, Basic.constant(0.5, ga.dataType) * ga) } } private[this] def rsqrtGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { unaryGradientOp(op.outputs(0), outputGradients, opType = "RsqrtGrad", name = "RSqrtGradient") } private[this] def rsqrtHessian(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val a = op.inputs(0) val b = op.inputs(1) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { val ca = conjugate(a) val cg = conjugate(outputGradient) val rg = unaryGradientOp(ca, outputGradients, opType = "RsqrtGrad", name = "RSqrtGradient") Seq(Basic.constant(-1.5, cg.dataType) * cg * b * square(ca), rg.head) } } private[this] def expGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val y = op.outputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * conjugate(y)) } } private[this] def expm1Gradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * exp(conjugate(x))) } } private[this] def logGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * reciprocal(conjugate(x))) } } private[this] def log1pGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * reciprocal(Basic.constant(1, x.dataType) + conjugate(x))) } } private[this] def sinGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * cos(conjugate(x))) } } private[this] def cosGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(negate(outputGradient) * sin(conjugate(x))) } } private[this] def tanGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * square(reciprocal(cos(conjugate(x))))) } } private[this] def asinGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * reciprocal(sqrt(Basic.constant(1, x.dataType) - square(conjugate(x))))) } } private[this] def acosGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(negate(outputGradient) * reciprocal(sqrt(Basic.constant(1, x.dataType) - square(conjugate(x))))) } } private[this] def atanGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * reciprocal(Basic.constant(1, x.dataType) + square(conjugate(x)))) } } private[this] def sinhGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * cosh(conjugate(x))) } } private[this] def coshGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * sinh(conjugate(x))) } } private[this] def tanhGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { var y = op.outputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { y = conjugate(y) unaryGradientOp(y, outputGradients, opType = "TanhGrad", name = "TanhGradient") } } private[this] def tanhHessian(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val a = op.inputs(0) val b = op.inputs(1) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { val ca = conjugate(a) val cb = conjugate(b) val rg = unaryGradientOp(ca, outputGradients, opType = "TanhGrad", name = "TanhGradient") Seq(Basic.constant(-2.0, outputGradient.dataType) * outputGradient * cb * ca, rg.head) } } private[this] def asinhGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val y = op.outputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient / cosh(conjugate(y))) } } private[this] def acoshGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val y = op.outputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient / sinh(conjugate(y))) } } private[this] def atanhGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * reciprocal(Basic.constant(1, x.dataType) - square(conjugate(x)))) } } private[this] def lgammaGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * digamma(conjugate(x))) } } private[this] def digammaGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * polygamma(Basic.constant(1, x.dataType), conjugate(x))) } } private[this] def erfGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head val twoOverRootPi = Basic.constant(2.0 / math.sqrt(math.Pi), outputGradient.dataType) Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * twoOverRootPi * exp(negate(square(conjugate(x))))) } } private[this] def erfcGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val outputGradient = outputGradients.head val minusTwoOverRootPi = Basic.constant(-2.0 / math.sqrt(math.Pi), outputGradient.dataType) Op.createWith(controlDependencies = Set(outputGradient.op)) { Seq(outputGradient * minusTwoOverRootPi * exp(negate(square(conjugate(x))))) } } private[this] def sigmoidGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { var y = op.outputs(0) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { y = conjugate(y) unaryGradientOp(y, outputGradients, opType = "SigmoidGrad", name = "SigmoidGradient") } } private[this] def sigmoidHessian(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val a = op.inputs(0) val b = op.inputs(1) val outputGradient = outputGradients.head Op.createWith(controlDependencies = Set(outputGradient.op)) { val ca = conjugate(a) val cb = conjugate(b) val gb = outputGradient * cb val rg = unaryGradientOp(ca, outputGradients, opType = "SigmoidGrad", name = "SigmoidGradient") Seq(subtract(gb, Basic.constant(-2.0, outputGradient.dataType) * gb * ca), rg.head) } } private[this] def signGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(Basic.zerosLike(op.inputs(0))) } private[this] def roundGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(null) } private[this] def rintGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(null) } private[this] def floorGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(null) } private[this] def ceilGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(null) } /** Returns `true` if the shapes of `x`, `y`, and `gradient` are all fully specified (i.e., statically known) * and equal. */ private[this] def shapeFullySpecifiedAndEqual(x: Output, y: Output, gradient: OutputLike): Boolean = { x.shape.isFullyDefined && y.shape.isFullyDefined && gradient.shape.isFullyDefined && x.shape == y.shape && x.shape == gradient.shape } private[this] def addGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val outputGradient = outputGradients.head.toOutput if (shapeFullySpecifiedAndEqual(x, y, outputGradient)) { Seq(outputGradient, outputGradient) } else { val xShape = Basic.shape(x) val yShape = Basic.shape(y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) Seq( Basic.reshape(sum(outputGradient, rx), xShape), Basic.reshape(sum(outputGradient, ry), yShape)) } } private[this] def subGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val outputGradient = outputGradients.head.toOutput if (shapeFullySpecifiedAndEqual(x, y, outputGradient)) { Seq(outputGradient, -outputGradient) } else { val xShape = Basic.shape(x) val yShape = Basic.shape(y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) Seq( Basic.reshape(sum(outputGradient, rx), xShape), Basic.reshape(-sum(outputGradient, ry), yShape)) } } private[this] def mulGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = conjugate(op.inputs(0)) val y = conjugate(op.inputs(1)) val outputGradient = outputGradients.head.toOutput if (shapeFullySpecifiedAndEqual(x, y, outputGradient) && (outputGradient.dataType == INT32 || outputGradient.dataType == FLOAT32)) { Seq(outputGradient * y, outputGradient * x) } else { val xShape = Basic.shape(x) val yShape = Basic.shape(y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) Seq( Basic.reshape(sum(multiply(outputGradient, y), rx), xShape), Basic.reshape(sum(multiply(x, outputGradient), ry), yShape)) } } private[this] def divGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = conjugate(op.inputs(0)) val y = conjugate(op.inputs(1)) val xShape = Basic.shape(x) val yShape = Basic.shape(y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) val outputGradient = outputGradients.head.toOutput Seq( Basic.reshape(sum(divide(outputGradient, y), rx), xShape), Basic.reshape(sum(multiply(outputGradient, divide(divide(negate(x), y), y)), ry), yShape)) } private[this] def floorDivGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(null, null) } private[this] def truncateDivGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(null, null) } private[this] def realDivGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = conjugate(op.inputs(0)) val y = conjugate(op.inputs(1)) val xShape = Basic.shape(x) val yShape = Basic.shape(y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) val outputGradient = outputGradients.head.toOutput Seq( Basic.reshape(sum(realDivide(outputGradient, y), rx), xShape), Basic.reshape(sum(multiply(outputGradient, realDivide(realDivide(negate(x), y), y)), ry), yShape)) } private[this] def squaredDifferenceGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val xShape = Basic.shape(x) val yShape = Basic.shape(y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) val outputGradient = outputGradients.head.toOutput val xGradient = Op.createWith(controlDependencies = Set(outputGradient.op)) { multiply(scalarMul(Basic.constant(2, outputGradient.dataType), outputGradient), subtract(x, y)) } Seq( Basic.reshape(sum(xGradient, rx), xShape), Basic.reshape(sum(xGradient, ry), yShape)) } private[this] def powGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = conjugate(op.inputs(0)) val y = conjugate(op.inputs(1)) val z = conjugate(op.outputs(0)) val xShape = Basic.shape(x) val yShape = Basic.shape(y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) val outputGradient = outputGradients.head.toOutput // Avoid false singularity at x = 0. val logX = { if (x.dataType.isComplex) { // real(x) < 0 is fine for the complex case. select(notEqual(x, Basic.constant(0, x.dataType)), log(x), Basic.zerosLike(x)) } else { // There's no sensible real value to return if x < 0, so we return 0. select(greater(x, Basic.constant(0, x.dataType)), log(x), Basic.zerosLike(x)) } } Seq( Basic.reshape(sum(outputGradient * y * pow(x, subtract(y, Basic.constant(1, y.dataType))), rx), xShape), Basic.reshape(sum(outputGradient * z * logX, ry), yShape)) } private[this] def igammacGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(null, negate(igammaGradient(op, outputGradients)(1))) } private[this] def igammaGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { // TODO: [GRADIENTS] Mark the derivative w.r.t. a as not implemented somehow, or implement it. val a = op.inputs(0) val x = op.inputs(1) val aShape = Basic.shape(a) val xShape = Basic.shape(x) val (_, rx) = Basic.broadcastGradientArguments(aShape, xShape) val outputGradient = outputGradients.head.toOutput // Perform operations in log space before summing, because Gamma(a) and Gamma'(a) can grow large. val partialX = exp(negate(x) + multiply(subtract(a, Basic.constant(1, a.dataType)), log(x)) - logGamma(a)) Seq(null, Basic.reshape(sum(multiply(partialX, outputGradient), rx), xShape)) } private[this] def zetaGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { // TODO: [GRADIENTS] Mark the derivative w.r.t. x as not implemented somehow, or implement it. val outputGradient = outputGradients.head.toOutput Op.createWith(controlDependencies = Set(outputGradient.op)) { val x = conjugate(op.inputs(0)) val q = conjugate(op.inputs(1)) val xShape = Basic.shape(x) val qShape = Basic.shape(q) val (_, rq) = Basic.broadcastGradientArguments(xShape, qShape) val partialQ = negate(x) * zeta(add(x, Basic.constant(1, x.dataType)), q) Seq(null, Basic.reshape(sum(multiply(partialQ, outputGradient), rq), qShape)) } } private[this] def polygammaGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { // TODO: [GRADIENTS] Mark the derivative w.r.t. n as not implemented somehow, or implement it. val outputGradient = outputGradients.head.toOutput Op.createWith(controlDependencies = Set(outputGradient.op)) { val n = conjugate(op.inputs(0)) val x = conjugate(op.inputs(1)) val nShape = Basic.shape(n) val xShape = Basic.shape(x) val (_, rx) = Basic.broadcastGradientArguments(nShape, xShape) val partialX = polygamma(add(n, Basic.constant(1, n.dataType)), x) Seq(null, Basic.reshape(sum(multiply(partialX, outputGradient), rx), xShape)) } } private[this] def atan2Gradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val outputGradient = outputGradients.head.toOutput Op.createWith(controlDependencies = Set(outputGradient.op)) { val gradientInverse = divide(outputGradient, add(square(x), square(y))) Seq( multiply(x, gradientInverse), multiply(negate(y), gradientInverse)) } } private[this] def minimumGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val xShape = Basic.shape(x) val yShape = Basic.shape(y) val outputGradient = outputGradients.head.toOutput val zeros = Basic.zerosLike(outputGradient) val xMask = lessEqual(x, y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) val xGradient = select(xMask, outputGradient, zeros) val yGradient = select(xMask, zeros, outputGradient) Seq( Basic.reshape(sum(xGradient, rx), xShape), Basic.reshape(sum(yGradient, ry), yShape)) } private[this] def maximumGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val xShape = Basic.shape(x) val yShape = Basic.shape(y) val outputGradient = outputGradients.head.toOutput val zeros = Basic.zerosLike(outputGradient) val xMask = greaterEqual(x, y) val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) val xGradient = select(xMask, outputGradient, zeros) val yGradient = select(xMask, outputGradient, zeros) Seq( Basic.reshape(sum(xGradient, rx), xShape), Basic.reshape(sum(yGradient, ry), yShape)) } private[this] def betaIncGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { // TODO: [GRADIENTS] Mark the derivative w.r.t. a and b as not implemented somehow, or implement it. val a = conjugate(op.inputs(0)) val b = conjugate(op.inputs(1)) val x = conjugate(op.inputs(2)) val aShape = Basic.shape(a) val xShape = Basic.shape(x) val outputGradient = outputGradients.head.toOutput val (_, rx) = Basic.broadcastGradientArguments(aShape, xShape) // Perform operations in log space before summing, because terms can grow large. val logBeta = logGamma(a) + logGamma(b) - logGamma(a + b) val one = Basic.constant(1, b.dataType) val partialX = exp(((b - 1) * log(one - x)) + ((a - one) * log(x)) - logBeta) Seq(null, null, Basic.reshape(sum(multiply(partialX, outputGradient), rx), xShape)) } /** Helper function for reduction ops that computes the reduction output shape, assuming `keepDims` is `true`. * * For example: * {{{ * // inputShape == [2, 3, 5, 7] * // axes = [1, 2] * reducedShape(inputShape, axes) ==> [2, 1, 1, 7] * }}} * * @param inputShape Shape of the tensor being reduced. * @param axes Reduction axes. * @return One-dimensional tensor representing the reduction output shape, assuming `keepDims` is `true`. */ private[this] def reducedShape(inputShape: Output, axes: Output): Output = { // Cast needed for SparseOutput reductions. val intInputShape = cast(inputShape, INT32) val inputRank = Basic.size(intInputShape) val reshapedAxes = { if (axes.rank == 0) Basic.reshape(axes, Tensor(1)) else axes } val intAxes = floorMod(add(cast(reshapedAxes, INT32), inputRank), inputRank) val axesShape = Basic.shape(intAxes) DataFlow.dynamicStitch( Seq(range(Basic.constant(0), inputRank), intAxes), Seq(intInputShape, Basic.fill(shape = axesShape)(1))) } private[this] def safeShapeDiv(x: Output, y: Output): Output = { truncateDivide(x, maximum(y, Basic.constant(1, y.dataType))) } private[this] def sumGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val input = op.inputs(0) val axes = op.inputs(1) val rank = input.shape.rank // Fast path for when reducing to a scalar and rank is known, which adds only reshape and tile ops (and possibly a // shape op too). if (rank == 0) { Seq(outputGradients.head, null) } else if (rank != -1 && axes.op.opType == "Const" && Output.constantValue(axes).orNull[Tensor] == (0 until rank: Tensor).cast(axes.dataType)) { // In this case the reduction was over all dimensions. var outputGradient = outputGradients.head.toOutput outputGradient = Basic.reshape(outputGradient, Shape(Array.fill(rank)(1))) val inputShape = { // If the shape is not fully defined but the rank is, we use the shape op. if (input.shape.isFullyDefined) input.shape.toOutput() else Basic.shape(input) } Seq(Basic.tile(outputGradient, inputShape), null) } else { val inputShape = Basic.shape(input) val outputShapeKeptDimensions = reducedShape(inputShape, axes) val tileScaling = safeShapeDiv(inputShape, outputShapeKeptDimensions) var outputGradient = outputGradients.head.toOutput outputGradient = Basic.reshape(outputGradient, outputShapeKeptDimensions) Seq(Basic.tile(outputGradient, tileScaling), null) } } private[this] def meanGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val sumGrad = sumGradient(op, outputGradients).head.toOutput val factor = { val inputSize = op.inputs(0).size val outputSize = op.outputs(0).size if (inputSize != -1 && outputSize != -1) { Basic.constant(inputSize / scala.math.max(outputSize, 1), sumGrad.dataType) } else { val inputShape = Basic.shape(op.inputs(0)) val outputShape = Basic.shape(op.outputs(0)) safeShapeDiv(prod(inputShape), prod(outputShape)) } } Seq(divide(sumGrad, cast(factor, sumGrad.dataType)), null) } private[this] def prodGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { // The gradient can be expressed by dividing the product by each entry of the input tensor, but this approach // can't deal with zeros in the input. Here, we avoid this problem by composing the output as a product of two // cumulative product operations. val inputShape = Basic.shape(op.inputs(0)) // Expand the gradient to the full input shape val outputShapeKeptDims = reducedShape(inputShape, op.inputs(1)) val tileScaling = safeShapeDiv(inputShape, outputShapeKeptDims) var gradient = outputGradients.head.toOutput gradient = Basic.reshape(gradient, outputShapeKeptDims) gradient = Basic.tile(gradient, tileScaling) // Pack all reduced dimensions into a single one, so we can perform the cumulative product ops. If the reduction // dimensions list is empty, it defaults to FLOAT32 data type, so we need to cast here. We place all the // shape-related ops on the CPU to avoid copying back and forth, and since "listdiff" is a CPU-only op. val (permutation, reducedNum, otherNum) = Op.createWith(device = "/cpu:0") { val rank = Basic.rank(op.inputs(0)) // Reshape the reduction indices for the case where the parameters is a scalar. val reductionIndices = floorMod(add(Basic.reshape(op.inputs(1), -1), rank), rank) val reduced = cast(reductionIndices, INT32) val indices = range(Basic.constant(0), rank) val (other, _) = Basic.listDiff(indices, reduced) (Basic.concatenate(Seq(reduced, other), 0), prod(Basic.gather(inputShape, reduced)), prod(Basic.gather(inputShape, other))) } val permuted = Basic.transpose(op.inputs(0), permutation) val permutedShape = Basic.shape(permuted) val reshaped = Basic.reshape(permuted, Basic.concatenate(Seq(reducedNum, otherNum))) // Calculate the product, leaving out the current entry. val left = cumprod(reshaped, axis = 0, exclusive = true) val right = cumprod(reshaped, axis = 0, exclusive = true, reverse = true) val y = Basic.reshape(multiply(left, right), permutedShape) // Invert the transpose and reshape operations. val output = multiply(gradient, Basic.transpose(y, Basic.invertPermutation(permutation))) // Make sure to set the statically known shape information through a reshape. Seq(Basic.reshape(output, inputShape), null) } private[this] def minOrMaxGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val inputShape = Basic.shape(op.inputs(0)) val outputShapeKeptDims = reducedShape(inputShape, op.inputs(1)) val y = Basic.reshape(op.outputs(0), outputShapeKeptDims) var gradient = outputGradients.head.toOutput gradient = Basic.reshape(gradient, outputShapeKeptDims) // Compute the number of selected (maximum or minimum) elements in each reduction dimension. If there are multiple // minimum or maximum elements then the gradient will be divided among them. val indicators = cast(equal(y, op.inputs(0)), gradient.dataType) val numberOfSelected = Basic.reshape(sum(indicators, op.inputs(1)), outputShapeKeptDims) Seq(multiply(divide(indicators, numberOfSelected), gradient), null) } private[this] def cumsumGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val axis = op.inputs(1) val exclusive = op.booleanAttribute("exclusive") val reverse = op.booleanAttribute("reverse") val outputGradient = outputGradients.head Seq(cumsum(outputGradient, axis, exclusive = exclusive, reverse = !reverse), null) } private[this] def cumprodGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val axis = op.inputs(1) val exclusive = op.booleanAttribute("exclusive") val reverse = op.booleanAttribute("reverse") val outputGradient = outputGradients.head // TODO: [GRADIENTS] !!! This fails when x contains 0 and should be fixed. val product = cumprod(x, axis, exclusive = exclusive, reverse = reverse) val result = cumsum(product * outputGradient, axis, exclusive = exclusive, reverse = !reverse) Seq(divide(result, x), null) } private[this] def segmentSumGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput Seq(Basic.gather(outputGradient, op.inputs(1)), null) } private[this] def segmentMeanGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput val inputRank = Basic.rank(op.inputs(0)) val onesShape = Basic.concatenate(Seq( Basic.shape(op.inputs(1)), Basic.fill( shape = Basic.expandDims(subtract(inputRank, Basic.constant(1, inputRank.dataType)), 0))( Basic.constant(1, inputRank.dataType)))) val ones = Basic.fill(shape = onesShape)(Basic.constant(1, outputGradient.dataType)) val scaledGradient = divide(outputGradient, segmentSum(ones, op.inputs(1))) Seq(Basic.gather(scaledGradient, op.inputs(1)), null) } private[this] def segmentMinOrMaxGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput // Get the number of selected (minimum or maximum) elements in each segment. val gatheredOutputs = Basic.gather(op.outputs(0), op.inputs(1)) val isSelected = equal(op.inputs(0), gatheredOutputs) val numSelected = segmentSum(cast(isSelected, outputGradient.dataType), op.inputs(1)) // Compute the gradient for each segment. The gradient for the ith segment is divided evenly among the selected // elements in that segment. val weightedGradients = divide(outputGradient, numSelected) val gatheredGradients = Basic.gather(weightedGradients, op.inputs(1)) val zeros = Basic.zerosLike(gatheredGradients) Seq(select(isSelected, gatheredGradients, zeros), null) } private[this] def gatherDropNegatives( parameters: Output, indices: Output, zeroClippedIndices: Output = null, isPositive: Output = null ): (Output, Output, Output) = { val computedZeroClippedIndices = { if (zeroClippedIndices != null) zeroClippedIndices else Math.maximum(indices, Basic.zerosLike(indices)) } val gathered = Basic.gather(parameters, zeroClippedIndices) val computedIsPositive = { if (isPositive != null) { isPositive } else { var isPositive = Math.greaterEqual(indices, 0) // `select` requires that the condition has the same shape as the other two arguments. val minusOne = Basic.constant(-1) (0 until (gathered.rank - isPositive.rank)).foreach(_ => { isPositive = Basic.expandDims(isPositive, minusOne) }) Math.logicalAnd(isPositive, Basic.onesLike(gathered, dataType = BOOLEAN)) } } (Math.select(computedIsPositive, gathered, Basic.zerosLike(gathered)), computedZeroClippedIndices, computedIsPositive) } private[this] def unsortedSegmentSumGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput Seq(gatherDropNegatives(outputGradient, op.inputs(1))._1, null, null) } private[this] def unsortedSegmentProdGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { // This gradient can be expressed for each segment by dividing the segment's product by each element of the // segment input tensor, but this approach cannot deal with zeros in the input. Unlike `prod` we cannot use the // cumulative sum op here, as individual segments may have a different number of elements. Therefore, we consider // three cases: // // 1) A segment input contains no zeros and can safely be divided by the input tensor. // 2) A segment contains exactly one zero. In this case, the gradient of each input of the segment is zero, // except for the 0-input. There the gradient is the product of the remaining segment entries. // 3) A segment contains at least two zeros. In this case, the gradient is zero for all segment inputs. var outputGradient = outputGradients.head.toOutput // Note that `unsortedSegmentSum` will filter out the negative indices, and so we do not need to do a `logicalAnd` // with `isPositive` here. val isZero = Math.equal(op.inputs(0), 0) val numZeros = Math.unsortedSegmentSum(cast(isZero, INT32), op.inputs(1), op.inputs(2)) // Handle case 3 and set the gradient to 0 for segments with more than one 0 as input. outputGradient = Math.select(Math.greater(numZeros, 1), Basic.zerosLike(outputGradient), outputGradient) // Replace all zeros with ones and compute the `unsortedSegmentProd`. val nonZeroData = Math.select(isZero, Basic.onesLike(op.inputs(0)), op.inputs(0)) val nonZeroProd = Math.unsortedSegmentProd(nonZeroData, op.inputs(1), op.inputs(2)) // Clip the indices for the gather to be positive. val zeroClippedIndices = Math.maximum(op.inputs(1), Basic.zerosLike(op.inputs(1))) val gatheredProd = Basic.gather(op.outputs(0), zeroClippedIndices) val gatheredNonZeroProd = Basic.gather(nonZeroProd, zeroClippedIndices) // The following may contain NaN/Inf. val gatheredProdDivided = gatheredProd / op.inputs(0) // Now fetch the individual results for segments containing zero and those that do not. `isZero` will also fetch // results for entries with negative indices, but the following `gatherDropNegatives` sets the corresponding entry // in the gradient to zero for these. val partialDerivative = Math.select(isZero, gatheredNonZeroProd, gatheredProdDivided) val gatheredGradient = gatherDropNegatives(outputGradient, op.inputs(1), zeroClippedIndices)._1 Seq(gatheredGradient * partialDerivative, null, null) } private[this] def unsortedSegmentMinOrMaxGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput // Get the number of selected (minimum or maximum) elements in each segment. val (gatheredOutputs, zeroClippedIndices, isPositive) = gatherDropNegatives(op.outputs(0), op.inputs(1)) val isSelected = Math.logicalAnd(Math.equal(op.inputs(0), gatheredOutputs), isPositive) val numSelected = unsortedSegmentSum(cast(isSelected, outputGradient.dataType), op.inputs(1), op.inputs(2)) // Compute the gradient for each segment. The gradient for the ith segment is divided evenly among the selected // elements in that segment. val weightedGradients = divide(outputGradient, numSelected) val (gatheredGradients, _, _) = gatherDropNegatives(weightedGradients, null, zeroClippedIndices, isPositive) val zeros = Basic.zerosLike(gatheredGradients) Seq(select(isSelected, gatheredGradients, zeros), null, null) } private[this] def sparseSegmentSumGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput val inputRows = Basic.shape(op.inputs(0))(0) Seq(unsortedSegmentSum(Basic.gather(outputGradient, op.inputs(2)), op.inputs(1), inputRows), null, null) } private[this] def sparseSegmentSumWithNumSegmentsGradient( op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { sparseSegmentSumGradient(op, outputGradients) :+ null } private[this] def sparseSegmentMeanGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput val inputRows = Basic.shape(op.inputs(0))(0) val gradient = Op.Builder(opType = "SparseSegmentMeanGrad", name = "SparseSegmentMeanGrad") .addInput(outputGradient) .addInput(op.inputs(1)) .addInput(op.inputs(2)) .addInput(inputRows) .build().outputs(0) Seq(gradient, null, null) } private[this] def sparseSegmentMeanWithNumSegmentsGradient( op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { sparseSegmentMeanGradient(op, outputGradients) :+ null } private[this] def sparseSegmentSumSqrtNGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput val inputRows = Basic.shape(op.inputs(0))(0) val gradient = Op.Builder(opType = "SparseSegmentSqrtNGrad", name = "SparseSegmentSumSqrtNGrad") .addInput(outputGradient) .addInput(op.inputs(1)) .addInput(op.inputs(2)) .addInput(inputRows) .build().outputs(0) Seq(gradient, null, null) } private[this] def sparseSegmentSumSqrtNWithNumSegmentsGradient( op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { sparseSegmentSumSqrtNGradient(op, outputGradients) :+ null } private[this] def diagGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(diagPart(outputGradients.head)) } private[this] def diagPartGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(diag(outputGradients.head)) } private[this] def matrixDiagGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(matrixDiagPart(outputGradients.head)) } private[this] def matrixSetDiagGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val gradient = outputGradients.head val inputShape = op.inputs(0).shape.mergeWith(gradient.shape) val batchShape = inputShape(0 :: -2).mergeWith(op.inputs(1).shape(0 :: -1)) val matrixShape = inputShape(-2 ::) val diagShape = { if (batchShape.isFullyDefined && matrixShape.isFullyDefined) { Basic.constant(Tensor((batchShape.asArray :+ matrixShape.asArray.min).map(Tensor(_)))) } else { Op.colocateWith(Set(gradient.op)) { val gradShape = Basic.shape(gradient) val gradRank = Basic.rank(gradient) val batchShape = Basic.slice(gradShape, 0, gradRank - 2) val matrixShape = Basic.slice(gradShape, gradRank - 2, 2) val minDim = min(matrixShape) Basic.concatenate(Seq(batchShape, minDim), 0) } } } val gradInput = matrixSetDiag(gradient, Basic.fill(shape = diagShape)(Tensor(gradient.dataType, 0))) val gradDiag = matrixDiagPart(gradient) Seq(gradInput, gradDiag) } private[this] def matrixDiagPartGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val matrixShape = op.inputs(0).shape(-2 ::) if (matrixShape.isFullyDefined && matrixShape(0) == matrixShape(1)) Seq(matrixDiag(outputGradients.head)) else Seq(matrixSetDiag(Basic.zerosLike(op.inputs(0)), outputGradients.head)) } private[this] def matrixBandPartGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(matrixBandPart(outputGradients.head, op.inputs(1), op.inputs(2)), null, null) } private[this] def batchMatMulGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val adjointX = op.booleanAttribute("adj_x") val adjointY = op.booleanAttribute("adj_y") val outputGradient = outputGradients.head.toOutput (adjointX, adjointY) match { case (false, false) => Seq[OutputLike]( matmul(outputGradient, y, transposeA = false, transposeB = true, conjugateA = false, conjugateB = true), matmul(x, outputGradient, transposeA = true, transposeB = false, conjugateA = true, conjugateB = false)) case (false, true) => Seq[OutputLike]( matmul(outputGradient, y, transposeA = false, transposeB = false, conjugateA = false, conjugateB = false), matmul(outputGradient, x, transposeA = true, transposeB = false, conjugateA = true, conjugateB = false)) case (true, false) => Seq[OutputLike]( matmul(y, outputGradient, transposeA = false, transposeB = true, conjugateA = false, conjugateB = true), matmul(x, outputGradient, transposeA = false, transposeB = false, conjugateA = false, conjugateB = false)) case (true, true) => Seq[OutputLike]( matmul(y, outputGradient, transposeA = true, transposeB = true, conjugateA = true, conjugateB = true), matmul(outputGradient, x, transposeA = true, transposeB = true, conjugateA = true, conjugateB = true)) } } private[this] def matMulGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val a = conjugate(op.inputs(0)) val b = conjugate(op.inputs(1)) val transposeA = op.booleanAttribute("transpose_a") val transposeB = op.booleanAttribute("transpose_b") val outputGradient = outputGradients.head.toOutput (transposeA, transposeB) match { case (false, false) => Seq[OutputLike]( matmul(outputGradient, b, transposeA = false, transposeB = true, conjugateA = false, conjugateB = false), matmul(a, outputGradient, transposeA = true, transposeB = false, conjugateA = false, conjugateB = false)) case (false, true) => Seq[OutputLike]( matmul(outputGradient, b, transposeA = false, transposeB = false, conjugateA = false, conjugateB = false), matmul(outputGradient, a, transposeA = true, transposeB = false, conjugateA = false, conjugateB = false)) case (true, false) => Seq[OutputLike]( matmul(b, outputGradient, transposeA = false, transposeB = true, conjugateA = false, conjugateB = false), matmul(a, outputGradient, transposeA = false, transposeB = false, conjugateA = false, conjugateB = false)) case (true, true) => Seq[OutputLike]( matmul(b, outputGradient, transposeA = true, transposeB = true, conjugateA = false, conjugateB = false), matmul(outputGradient, a, transposeA = true, transposeB = true, conjugateA = false, conjugateB = false)) } } private[this] def sparseMatMulGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val a = op.inputs(0) val b = op.inputs(1) val transposeA = op.booleanAttribute("transpose_a") val transposeB = op.booleanAttribute("transpose_b") val outputGradient = outputGradients.head.toOutput val aIsSparse = op.booleanAttribute("a_is_sparse") val bIsSparse = op.booleanAttribute("b_is_sparse") // Use heuristic to figure out if the gradient may be sparse. val gradIsSparse = outputGradient.op.opType == "ReluGrad" def helper( a: Output, b: Output, dataType: DataType, tA: Boolean = false, tB: Boolean = false, sA: Boolean = false, sB: Boolean = false): Output = { cast(matmul( a = a, b = if (tB) Basic.transpose(b) else b, transposeA = tA, transposeB = false, conjugateA = false, conjugateB = false, aIsSparse = sA, bIsSparse = sB), dataType) } (transposeA, transposeB) match { case (false, false) => Seq[OutputLike]( helper(outputGradient, b, a.dataType, tA = false, tB = true, sA = gradIsSparse, sB = bIsSparse), helper(a, outputGradient, b.dataType, tA = true, tB = false, sA = aIsSparse, sB = gradIsSparse)) case (false, true) => Seq[OutputLike]( helper(outputGradient, b, a.dataType, tA = false, tB = false, sA = gradIsSparse, sB = bIsSparse), helper(outputGradient, a, b.dataType, tA = true, tB = false, sA = gradIsSparse, sB = aIsSparse)) case (true, false) => Seq[OutputLike]( helper(b, outputGradient, a.dataType, tA = false, tB = true, sA = bIsSparse, sB = gradIsSparse), helper(a, outputGradient, b.dataType, tA = false, tB = false, sA = aIsSparse, sB = gradIsSparse)) case (true, true) => Seq[OutputLike]( helper(b, outputGradient, a.dataType, tA = true, tB = true, sA = bIsSparse, sB = gradIsSparse), helper(outputGradient, a, b.dataType, tA = true, tB = true, sA = gradIsSparse, sB = aIsSparse)) } } private[this] def crossGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val u = op.inputs(0) val v = op.inputs(1) val outputGradient = outputGradients.head.toOutput Seq(cross(v, outputGradient), cross(outputGradient, u)) } private[this] def complexGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val x = op.inputs(0) val y = op.inputs(1) val xShape = Basic.shape(x) val yShape = Basic.shape(y) val outputGradient = outputGradients.head.toOutput val (rx, ry) = Basic.broadcastGradientArguments(xShape, yShape) Seq( Basic.reshape(sum(real(outputGradient), rx), xShape), Basic.reshape(sum(imag(outputGradient), ry), yShape)) } private[this] def realGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput Seq(complex(outputGradient, Basic.constant(0, outputGradient.dataType))) } private[this] def imagGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { val outputGradient = outputGradients.head.toOutput Seq(complex(Basic.constant(0, outputGradient.dataType), outputGradient)) } private[this] def conjGradient(op: Op, outputGradients: Seq[OutputLike]): Seq[OutputLike] = { Seq(conjugate(outputGradients.head)) } } /** @define OpDocMathSelect * The `select` op selects elements from `x` or `y`, depending on `condition`. * * The `x`, and `y` tensors must have the same shape. The output tensor will also have the same shape. * * The `condition` tensor must be a scalar if `x` and `y` are scalars. If `x` and `y` are vectors or higher rank, * then `condition` must be either a scalar, or a vector with size matching the first dimension of `x`, or it must * have the same shape as `x`. * * The `condition` tensor acts as a mask that chooses, based on the value at each element, whether the * corresponding element / row in the output should be taken from `x` (if true) or `y` (if false). * * If `condition` is a vector and `x` and `y` are higher rank matrices, then it chooses which row (outer dimension) * to copy from `x` and `y`. If `condition` has the same shape as `x` and `y`, then it chooses which element to * copy from `x` and `y`. * * For example: * {{{ * // 'condition' tensor is [[true, false], [false, true]] * // 'x' is [[1, 2], [3, 4]] * // 'y' is [[5, 6], [7, 8]] * select(condition, x, y) ==> [[1, 6], [7, 4]] * * // 'condition' tensor is [true, false] * // 'x' is [[1, 2], [3, 4]] * // 'y' is [[5, 6], [7, 8]] * select(condition, x, y) ==> [[1, 2], [7, 8]] * }}} * * @define OpDocMathRange * The `range` op constructs a sequence of numbers. * * The op creates a sequence of numbers that begins at `start` and extends by increments of `delta` up to but not * including `limit`. The data type of the resulting tensor is inferred from the inputs unless it is provided * explicitly. * * For example: * {{{ * // 'start' is 3 * // 'limit' is 18 * // 'delta' is 3 * range(start, limit, delta) ==> [3, 6, 9, 12, 15] * * // 'start' is 3 * // 'limit' is 1 * // 'delta' is -0.5 * range(start, limit, delta) ==> [3.0, 2.5, 2.0, 1.5] * }}} * * @define OpDocMathLinspace * The `linspace` op generates values in an interval. * * The op generates a sequence of `numberOfValues` evenly-spaced values beginning at `start`. If * `numberOfValues > 1`, the values in the sequence increase by `(stop - start) / (numberOfValues - 1)`, so that the * last value is exactly equal to `stop`. * * For example: * {{{ * linspace(10.0, 12.0, 3) ==> [10.0 11.0 12.0] * }}} * * @define OpDocMathCast * The `cast` op casts a tensor to a new data type. * * The op casts `x` to the provided data type. * * For example: * {{{ * // `a` is a tensor with values [1.8, 2.2], and data type FLOAT32 * cast(a, INT32) ==> [1, 2] // with data type INT32 * }}} * * **NOTE**: Only a smaller number of types are supported by the `cast` op. The exact casting rule is TBD. The * current implementation uses C++ static cast rules for numeric types, which may be changed in the future. * * @define OpDocMathBitcast * The `bitcast` op bitcasts a tensor from one type to another without copying data. * * Given a tensor `input`, the op returns a tensor that has the same buffer data as `input`, but with data type * `dataType`. If the input data type `T` is larger (in terms of number of bytes), then the output data type * `dataType`, then the shape changes from `[...]` to `[..., sizeof(T)/sizeof(dataType)]`. If `T` is smaller than * `dataType`, then the op requires that the rightmost dimension be equal to `sizeof(dataType)/sizeof(T)`. The * shape then changes from `[..., sizeof(type)/sizeof(T)]` to `[...]`. * * *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give * different results. * * @define OpDocMathAddN * The `addN` op adds all input tensors element-wise. * * @define OpDocMathAccumulateN * The `accumulateN` op adds all input tensors element-wise. * * This op performs the same operation as the `addN` op, but it does not wait for all of its inputs to be ready * before beginning to sum. This can save memory if the inputs become available at different times, since the * minimum temporary storage is proportional to the output size, rather than the inputs size. * * @define OpDocMathAbs * The `abs` op computes the absolute value of a tensor. * * Given a tensor `x` of real numbers, the op returns a tensor containing the absolute value of each element in * `x`. For example, if `x` is an input element and `y` is an output element, the op computes `y = |x|`. * * Given a tensor `x` of complex numbers, the op returns a tensor of type `FLOAT32` or `FLOAT64` that is the * magnitude value of each element in `x`. All elements in `x` must be complex numbers of the form `a + bj`. The * magnitude is computed as `\sqrt{a^2 + b^2}`. For example: * {{{ * // Tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]] * abs(x) ==> [5.25594902, 6.60492229] * }}} * * @define OpDocMathNegate * The `negate` op computes the numerical negative value of a tensor element-wise. I.e., `y = -x`. * * @define OpDocMathReciprocal * The `reciprocal` op computes the reciprocal value of a tensor element-wise. I.e., `y = 1 / x`. * * @define OpDocMathSquare * The `square` op computes the square of a tensor element-wise. I.e., `y = x * x = x^2`. * * @define OpDocMathSqrt * The `sqrt` op computes the square root of a tensor element-wise. I.e., `y = \sqrt{x} = x^{1/2}`. * * @define OpDocMathRsqrt * The `rsqrt` op computes the reciprocal of the square root of a tensor element-wise. I.e., * `y = 1 / \sqrt{x} = 1 / x^{1/2}`. * * @define OpDocMathExp * The `exp` op computes the exponential of a tensor element-wise. I.e., `y = \exp{x} = e^x`. * * @define OpDocMathExpm1 * The `expm1` op computes the exponential of a tensor minus `1` element-wise. I.e., `y = \exp{x} - 1`. * * @define OpDocMathLog * The `log` op computes the logarithm of a tensor element-wise. I.e., `y = \log{x}`. * * @define OpDocMathLog1p * The `log1p` op computes the logarithm of a tensor plus `1` element-wise. I.e., `y = \log{1 + x}`. * * @define OpDocMathSin * The `sin` op computes the sine of a tensor element-wise. I.e., `y = \sin{x}`. * * @define OpDocMathCos * The `cos` op computes the cosine of a tensor element-wise. I.e., `y = \cos{x}`. * * @define OpDocMathTan * The `tan` op computes the tangent of a tensor element-wise. I.e., `y = \tan{x}`. * * @define OpDocMathAsin * The `asin` op computes the inverse sine of a tensor element-wise. I.e., `y = \asin{x}`. * * @define OpDocMathAcos * The `acos` op computes the inverse cosine of a tensor element-wise. I.e., `y = \acos{x}`. * * @define OpDocMathAtan * The `atan` op computes the inverse tangent of a tensor element-wise. I.e., `y = \atan{x}`. * * @define OpDocMathSinh * The `sinh` op computes the hyperbolic sine of a tensor element-wise. I.e., `y = \sinh{x}`. * * @define OpDocMathCosh * The `cosh` op computes the hyperbolic cosine of a tensor element-wise. I.e., `y = \cosh{x}`. * * @define OpDocMathTanh * The `tanh` op computes the hyperbolic tangent of a tensor element-wise. I.e., `y = \tanh{x}`. * * @define OpDocMathAsinh * The `asinh` op computes the inverse hyperbolic sine of a tensor element-wise. I.e., `y = \asinh{x}`. * * @define OpDocMathAcosh * The `acosh` op computes the inverse hyperbolic cosine of a tensor element-wise. I.e., `y = \acosh{x}`. * * @define OpDocMathAtanh * The `atanh` op computes the inverse hyperbolic tangent of a tensor element-wise. I.e., `y = \atanh{x}`. * * @define OpDocMathLogGamma * The `logGamma` op computes the logarithm of the absolute value of the Gamma function applied element-wise on a * tensor. I.e., `y = \log{|\Gamma{x}|}`. * * @define OpDocMathDigamma * The `digamma` op computes the derivative of the logarithm of the absolute value of the Gamma function applied * element-wise on a tensor (i.e., the digamma or Psi function). I.e., `y = \partial\log{|\Gamma{x}|}`. * * @define OpDocMathErf * The `erf` op computes the Gaussian error function element-wise on a tensor. * * @define OpDocMathErfc * The `erfc` op computes the complementary Gaussian error function element-wise on a tensor. * * @define OpDocMathSigmoid * The `sigmoid` op computes the sigmoid function element-wise on a tensor. * * @define OpDocMathSign * The `sign` op computes an element-wise indication of the sign of a tensor. * * I.e., `y = sign(x) = -1` if `x < 0`; `0` if `x == 0`; `1` if `x > 0`. * * Zero is returned for `NaN` inputs. * * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. * * @define OpDocMathRound * The `round` op computes the round value of a tensor element-wise. * * Rounds half to even. Also known as bankers rounding. If you want to round according to the current system * rounding mode use the [[roundInt]] op instead. * * For example: * {{{ * // 'a' is [0.9, 2.5, 2.3, 1.5, -4.5] * round(a) ==> [1.0, 2.0, 2.0, 2.0, -4.0] * }}} * * @define OpDocMathRoundInt * The `roundInt` op computes the round value of a tensor element-wise. * * If the result is midway between two representable values, the even representable is chosen. * * For example: * {{{ * roundInt(-1.5) ==> -2.0 * roundInt(0.5000001) ==> 1.0 * roundInt([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * }}} * * @define OpDocMathFloor * The `floor` op computes the largest integer not greater than the current value of a tensor, element-wise. * * @define OpDocMathCeil * The `ceil` op computes the smallest integer not greater than the current value of a tensor, element-wise. * * @define OpDocMathIsNaN * The `isNaN` op returns a boolean tensor indicating which elements of a tensor are NaN-valued. * * @define OpDocMathIsInf * The `isInf` op returns a boolean tensor indicating which elements of a tensor are Inf-valued. * * @define OpDocMathIsFinite * The `isFinite` op returns a boolean tensor indicating which elements of a tensor are finite-valued. * * @define OpDocMathAdd * The `add` op adds two tensors element-wise. I.e., `z = x + y`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathSubtract * The `subtract` op subtracts two tensors element-wise. I.e., `z = x - y`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathMultiply * The `multiply` op multiplies two tensors element-wise. I.e., `z = x * y`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathDivide * The `divide` op divides two tensors element-wise. I.e., `z = x / y`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathFloorDivide * The `floorDivide` op floor-divides two tensors element-wise. I.e., `z = x // y`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathTruncateDivide * The `truncateDivide` op truncate-divides two tensors element-wise. * * Truncation designates that negative numbers will round fractional quantities toward zero. I.e. `-7 / 5 = 1`. * This matches C semantics but it is different than Python semantics. See `floorDivide` for a division function * that matches Python semantics. * * I.e., `z = x / y`, for `x` and `y` being integer tensors. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathRealDivide * The `realDivide` op divides two real tensors element-wise. * * If `x` and `y` are real-valued tensors, the op will return the floating-point division. * * I.e., `z = x / y`, for `x` and `y` being real tensors. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathSquaredDifference * The `squaredDifference` op computes the squared difference between two tensors element-wise. * I.e., `z = (x - y) * (x - y)`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathMod * The `mod` op computes the remainder of the division between two tensors element-wise. * * The op emulates C semantics in that the result is consistent with a truncating divide. * E.g., `truncate(x / y) * y + truncateMod(x, y) = x`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathFloorMod * The `floorMod` op computes the remainder of the division between two tensors element-wise. * * When `x < 0` xor `y < 0` is true, the op follows Python semantics in that the result here is * consistent with a flooring divide. E.g., `floor(x / y) * y + mod(x, y) = x`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathTruncateMod * The `truncateMod` op computes the remainder of the division between two tensors element-wise. * * The op emulates C semantics in that the result here is consistent with a truncating divide. * E.g., `truncate(x / y) * y + truncateMod(x, y) = x`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathPow * The `pow` op computes the power of one tensor raised to another, element-wise. * * Given a tensor `x` and a tensor `y`, the op computes `x^y` for the corresponding elements in `x` * and `y`. * * For example: * {{{ * // Tensor 'x' is [[2, 2], [3, 3]] * // Tensor 'y' is [[8, 16], [2, 3]] * pow(x, y) ==> [[256, 65536], [9, 27]] * }}} * * @define OpDocMathIgammac * The `igammac` op computes the upper regularized incomplete Gamma function `Q(a, x)`. * * The upper regularized incomplete Gamma function is defined as: * * `Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)`, where: * * `Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt` * * is the upper incomplete Gama function. * * Note that, above, `P(a, x)` (`Igamma`) is the lower regularized complete Gamma function. * * @define OpDocMathIgamma * The `igamma` op computes the lower regularized incomplete Gamma function `Q(a, x)`. * * The lower regularized incomplete Gamma function is defined as: * * `P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)`, where: * * `Gamma(a, x) = \int_{0}^{x} t^{a-1} exp(-t) dt` * * is the lower incomplete Gamma function. * * Note that, above, `Q(a, x)` (`Igammac`) is the upper regularized complete Gamma function. * * @define OpDocMathZeta * The `zeta` op computes the Hurwitz zeta function `\zeta(x, q)`. * * The Hurwitz zeta function is defined as: * * `\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}`. * * @define OpDocMathPolygamma * The `polygamma` op computes the polygamma function `\psi^{(n)}(x)`. * * The polygamma function is defined as: * * `\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)`, where `\psi(x)` is the digamma function. * * @define OpDocMathAtan2 * The `atan2` op computes the inverse tangent of `x / y` element-wise, respecting signs of the arguments. * * The op computes the angle `\theta \in [-\pi, \pi]` such that `y = r \cos(\theta)` and * `x = r \sin(\theta)`, where `r = \sqrt(x^2 + y^2)`. * * @define OpDocMathMinimum * The `minimum` op returns the element-wise minimum between two tensors. I.e., `z = x < y ? x : y`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathMaximum * The `maximum` op returns the element-wise maximum between two tensors. I.e., `z = x > y ? x : y`. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathIncompleteBeta * The `incompleteBeta` op computes the regularized incomplete beta integral `I_x(a, b)`. * * The regularized incomplete beta integral is defined as: * * `I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}`, where: * * `B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt` * * is the incomplete beta function and `B(a, b)` is the *complete* beta function. * * @define OpDocMathLogicalNot * The `logicalNot` op computes the truth value of `!x` element-wise. * * @define OpDocMathLogicalAnd * The `logicalAnd` op computes the truth value of `x && y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathLogicalOr * The `logicalOr` op computes the truth value of `x || y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathLogicalXOr * The `logicalXOr` op computes the truth value of `(x || y) && !(x && y)` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathEqual * The `equal` op computes the truth value of `x == y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathNotEqual * The `notEqual` op computes the truth value of `x != y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathApproximatelyEqual * The `approximatelyEqual` op computes the truth value of `abs(x - y) < tolerance` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathLess * The `less` op computes the truth value of `x < y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathLessEqual * The `lessEqual` op computes the truth value of `x <= y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathGreater * The `greater` op computes the truth value of `x > y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathGreaterEqual * The `greaterEqual` op computes the truth value of `x >= y` element-wise. * * NOTE: This op supports broadcasting. More information about broadcasting can be found * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). * * @define OpDocMathSum * The `sum` op computes the sum of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[1, 1, 1]], [1, 1, 1]] * sum(x) ==> 6 * sum(x, 0) ==> [2, 2, 2] * sum(x, 1) ==> [3, 3] * sum(x, 1, keepDims = true) ==> [[3], [3]] * sum(x, [0, 1]) ==> 6 * }}} * * @define OpDocMathMean * The `mean` op computes the mean of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[1.0, 1.0], [2.0, 2.0]] * mean(x) ==> 1.5 * mean(x, 0) ==> [1.5, 1.5] * mean(x, 1) ==> [1.0, 2.0] * }}} * * @define OpDocMathProd * The `prod` op computes the product of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[1, 1, 1]], [1, 1, 1]] * prod(x) ==> 1 * prod(x, 0) ==> [1, 1, 1] * prod(x, 1) ==> [1, 1] * prod(x, 1, keepDims = true) ==> [[1], [1]] * prod(x, [0, 1]) ==> 1 * }}} * * @define OpDocMathMin * The `min` op computes the minimum of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[1.0, 1.0], [2.0, 2.0]] * min(x) ==> 1.0 * min(x, 0) ==> [1.0, 1.0] * min(x, 1) ==> [1.0, 2.0] * }}} * * @define OpDocMathMax * The `max` op computes the maximum of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[1.0, 1.0], [2.0, 2.0]] * max(x) ==> 2.0 * max(x, 0) ==> [2.0, 2.0] * max(x, 1) ==> [1.0, 2.0] * }}} * * @define OpDocMathAll * The `all` op computes the logical AND of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[true, true], [false, false]] * all(x) ==> false * all(x, 0) ==> [false, false] * all(x, 1) ==> [true, false] * }}} * * @define OpDocMathAny * The `any` op computes the logical OR of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[true, true], [false, false]] * any(x) ==> true * any(x, 0) ==> [true, true] * any(x, 1) ==> [true, false] * }}} * * @define OpDocMathLogSumExp * The `logSumExp` op computes the log-sum-exp of elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * For example: * {{{ * // 'x' is [[0, 0, 0], [0, 0, 0]] * logSumExp(x) ==> log(6) * logSumExp(x, 0) ==> [log(2), log(2), log(2)] * logSumExp(x, 1) ==> [log(3), log(3)] * logSumExp(x, 1, keepDims = true) ==> [[log(3)], [log(3)]] * logSumExp(x, [0, 1]) ==> log(6) * }}} * * @define OpDocMathCountNonZero * The `countNonZero` op computes the number of non-zero elements across axes of a tensor. * * Reduces `input` along the axes given in `axes`. Unless `keepDims` is `true`, the rank of the tensor is reduced * by 1 for each entry in `axes`. If `keepDims` is `true`, the reduced axes are retained with size 1. * * If `axes` is `null`, then all axes are reduced, and a tensor with a single element is returned. * * IMPORTANT NOTE: Floating point comparison to zero is done by exact floating point equality check. Small values * are '''not''' rounded to zero for the purposes of the non-zero check. * * For example: * {{{ * // 'x' is [[0, 1, 0], [1, 1, 0]] * countNonZero(x) ==> 3 * countNonZero(x, 0) ==> [1, 2, 0] * countNonZero(x, 1) ==> [1, 2] * countNonZero(x, 1, keepDims = true) ==> [[1], [2]] * countNonZero(x, [0, 1]) ==> 3 * }}} * * @define OpDocMathArgmax * The `argmax` op returns the indices with the largest value across axes of a tensor. * * Note that in case of ties the identity of the return value is not guaranteed. * * @define OpDocMathArgmin * The `argmin` op returns the indices with the smallest value across axes of a tensor. * * Note that in case of ties the identity of the return value is not guaranteed. * * @define OpDocMathBinCount * The `binCount` op counts the number of occurrences of each value in an integer tensor. * * If `minLength` and `maxLength` are not provided, the op returns a vector with length `max(input) + 1`, if * `input` is non-empty, and length `0` otherwise. * * If `weights` is not `null`, then index `i` of the output stores the sum of the value in `weights` at each * index where the corresponding value in `input` is equal to `i`. * * @define OpDocMathCumsum * The `cumsum` op computes the cumulative sum of the tensor along an axis. * * By default, the op performs an inclusive cumulative sum, which means that the first element of the input is * identical to the first element of the output: * {{{ * cumsum([a, b, c]) ==> [a, a + b, a + b + c] * }}} * * By setting the `exclusive` argument to `true`, an exclusive cumulative sum is performed instead: * {{{ * cumsum([a, b, c], exclusive = true) ==> [0, a, a + b] * }}} * * By setting the `reverse` argument to `true`, the cumulative sum is performed in the opposite direction: * {{{ * cumsum([a, b, c], reverse = true) ==> [a + b + c, b + c, c] * }}} * * This is more efficient than using separate [[Basic.reverse]] ops. * * The `reverse` and `exclusive` arguments can also be combined: * {{{ * cumsum([a, b, c], exclusive = true, reverse = true) ==> [b + c, c, 0] * }}} * * @define OpDocMathCumprod * The `cumprod` op computes the cumulative product of the tensor along an axis. * * By default, the op performs an inclusive cumulative product, which means that the first element of the input * is identical to the first element of the output: * {{{ * cumprod([a, b, c]) ==> [a, a * b, a * b * c] * }}} * * By setting the `exclusive` argument to `true`, an exclusive cumulative product is performed instead: * {{{ * cumprod([a, b, c], exclusive = true) ==> [0, a, a * b] * }}} * * By setting the `reverse` argument to `true`, the cumulative product is performed in the opposite direction: * {{{ * cumprod([a, b, c], reverse = true) ==> [a * b * c, b * c, c] * }}} * * This is more efficient than using separate [[Basic.reverse]] ops. * * The `reverse` and `exclusive` arguments can also be combined: * {{{ * cumprod([a, b, c], exclusive = true, reverse = true) ==> [b * c, c, 0] * }}} * * @define OpDocMathSegmentSum * The `segmentSum` op computes the sum along segments of a tensor. * * The op computes a tensor such that `output(i) = \sum_{j...} data(j,...)` where the sum is over all `j` such * that `segmentIndices(j) == i`. Unlike `unsortedSegmentSum`, `segmentIndices` need be sorted. * * If the sum if empty for a given segment index `i`, `output(i)` is set to `0`. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathSegmentMean * The `segmentMean` op computes the mean along segments of a tensor. * * The op computes a tensor such that `output(i) = \frac{sum_{j...} data(j,...)}{N}` where the sum is over * all `j` such that `segmentIndices(j) == i` and `N` is the total number of values being summed. Unlike * `unsortedSegmentMean`, `segmentIndices` need to be sorted. * * If the sum if empty for a given segment index `i`, `output(i)` is set to `0`. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathSegmentProd * The `segmentProd` op computes the product along segments of a tensor. * * The op computes a tensor such that `output(i) = \prod_{j...} data(j,...)` where the product is over all `j` * such that `segmentIndices(j) == i`. Unlike `unsortedSegmentProd`, `segmentIndices` need be sorted. * * If the product if empty for a given segment index `i`, `output(i)` is set to `1`. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathSegmentMin * The `segmentMin` op computes the min along segments of a tensor. * * The op computes a tensor such that `output(i) = \min_{j...} data(j,...)` where the min is over all `j` * such that `segmentIndices(j) == i`. Unlike `unsortedSegmentMin`, `segmentIndices` need be sorted. * * If the min if empty for a given segment index `i`, `output(i)` is set to `0`. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathSegmentMax * The `segmentMax` op computes the max along segments of a tensor. * * The op computes a tensor such that `output(i) = \max_{j...} data(j,...)` where the max is over all `j` * such that `segmentIndices(j) == i`. Unlike `unsortedSegmentMax`, `segmentIndices` need be sorted. * * If the max if empty for a given segment index `i`, `output(i)` is set to `0`. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathUnsortedSegmentSum * The `unsortedSegmentSum` op computes the sum along segments of a tensor. * * The op computes a tensor such that `output(i) = \sum_{j...} data(j...)` where the sum is over all `j` * such that `segmentIndices(j) == i`. Unlike `segmentSum`, `segmentIndices` need not be sorted and need not * cover all values in the full range of valid values. * * If the sum if empty for a given segment index `i`, `output(i)` is set to `0`. * * `segmentsNumber` should equal the number of distinct segment indices. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathUnsortedSegmentMean * The `unsortedSegmentMean` op computes the mean along segments of a tensor. * * The op computes a tensor such that `output(i) = \frac{\sum_{j...} data(j...)}{N}` where the sum is over * all `j` such that `segmentIndices(j) == i` and `N` is the total number of values being summed. Unlike * `segmentSum`, `segmentIndices` need not be sorted and need not cover all values in the full range of valid * values. * * If the sum if empty for a given segment index `i`, `output(i)` is set to `0`. * * `segmentsNumber` should equal the number of distinct segment indices. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathUnsortedSegmentProd * The `unsortedSegmentProd` op computes the product along segments of a tensor. * * The op computes a tensor such that `output(i) = \prod_{j...} data(j...)` where the product is over all `j` * such that `segmentIndices(j) == i`. Unlike `segmentProd`, `segmentIndices` need not be sorted and need not * cover all values in the full range of valid values. * * If the product if empty for a given segment index `i`, `output(i)` is set to `1`. * * `segmentsNumber` should equal the number of distinct segment indices. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathUnsortedSegmentMin * The `unsortedSegmentMin` op computes the min along segments of a tensor. * * The op computes a tensor such that `output(i) = \min_{j...} data(j...)` where the min is over all `j` * such that `segmentIndices(j) == i`. Unlike `segmentMin`, `segmentIndices` need not be sorted and need not * cover all values in the full range of valid values. * * If the min if empty for a given segment index `i`, `output(i)` is set to `0`. * * `segmentsNumber` should equal the number of distinct segment indices. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathUnsortedSegmentMax * The `unsortedSegmentMax` op computes the max along segments of a tensor. * * The op computes a tensor such that `output(i) = \max_{j...} data(j...)` where the max is over all `j` * such that `segmentIndices(j) == i`. Unlike `segmentMax`, `segmentIndices` need not be sorted and need not * cover all values in the full range of valid values. * * If the max if empty for a given segment index `i`, `output(i)` is set to `0`. * * `segmentsNumber` should equal the number of distinct segment indices. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathUnsortedSegmentSqrtN * The `unsortedSegmentSqrtN` op computes the sum along segments of a tensor, divided by the square root of * number of elements being summed. * * The op computes a tensor such that `output(i) = \frac{\sum_{j...} data(j...)}{\sqrt{N}}` where the sum is * over all `j` such that `segmentIndices(j) == i` and `N` is the total number of values being summed. * * If the sum if empty for a given segment index `i`, `output(i)` is set to `0`. * * `segmentsNumber` should equal the number of distinct segment indices. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathSparseSegmentSum * The `sparseSegmentSum` op computes the sum along sparse segments of a tensor. * * The op is similar to that of [[segmentSum]], with the difference that `segmentIndices` can have rank less * than `data`'s first dimension, selecting a subset of dimension `0`, specified by `indices`. `segmentIndices` is * allowed to have missing indices, in which case the output will be zeros at those indices. In those cases, * `numSegments` is used to determine the size of the output. * * For example: * {{{ * // 'c' is [[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]] * * // Select two rows, one segment. * sparseSegmentSum(c, Tensor(0, 1), Tensor(0, 0)) ==> [[0, 0, 0, 0]] * * // Select two rows, two segments. * sparseSegmentSum(c, Tensor(0, 1), Tensor(0, 1)) ==> [[1, 2, 3, 4], [-1, -2, -3, -4]] * * // Select all rows, two segments. * sparseSegmentSum(c, Tensor(0, 1, 2), Tensor(0, 0, 1)) ==> [[0, 0, 0, 0], [5, 6, 7, 8]] * // which is equivalent to: * segmentSum(c, Tensor(0, 0, 1)) * }}} * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathSparseSegmentMean * The `sparseSegmentMean` op computes the mean along sparse segments of a tensor. * * The op is similar to that of [[segmentMean]], with the difference that `segmentIndices` can have rank less * than `data`'s first dimension, selecting a subset of dimension `0`, specified by `indices`. `segmentIndices` is * allowed to have missing indices, in which case the output will be zeros at those indices. In those cases, * `numSegments` is used to determine the size of the output. * * For example: * {{{ * // 'c' is [[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]] * * // Select two rows, one segment. * sparseSegmentMean(c, Tensor(0, 1), Tensor(0, 0)) ==> [[0, 0, 0, 0]] * * // Select two rows, two segments. * sparseSegmentMean(c, Tensor(0, 1), Tensor(0, 1)) ==> [[1, 2, 3, 4], [-1, -2, -3, -4]] * * // Select all rows, two segments. * sparseSegmentMean(c, Tensor(0, 1, 2), Tensor(0, 0, 1)) ==> [[0, 0, 0, 0], [5, 6, 7, 8]] * // which is equivalent to: * segmentMean(c, Tensor(0, 0, 1)) * }}} * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathSparseSegmentSumSqrtN * The `sparseSegmentSumSqrtN` op computes the sum along sparse segments of a tensor, divided by the square * root of the number of elements being summed. `segmentIndices` is allowed to have missing indices, in which case * the output will be zeros at those indices. In those cases, `numSegments` is used to determine the size of the * output. * * Similar to [[sparseSegmentSum]]. * * The result tensor has the same data type as `data`, but its first dimension size is equal to the number of * distinct segment indices. * * @define OpDocMathDiag * The `diag` op constructs a diagonal tensor using the provided diagonal values. * * Given a `diagonal`, the op returns a tensor with that `diagonal` and everything else padded with zeros. The * diagonal is computed as follows: * * Assume that `diagonal` has shape `[D1,..., DK]`. Then the output tensor, `output`, is a rank-`2K` tensor with * shape `[D1, ..., DK, D1, ..., DK]`, where `output(i1, ..., iK, i1, ..., iK) = diagonal(i1, ..., iK)` and `0` * everywhere else. * * For example: * {{{ * // 'diagonal' is [1, 2, 3, 4] * diag(diagonal) ==> [[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]] * }}} * * This op is the inverse of [[diagPart]]. * * @define OpDocMathDiagPart * The `diagPart` op returns the diagonal part of a tensor. * * The op returns a tensor with the `diagonal` part of the `input`. The `diagonal` part is computed as follows: * * Assume `input` has shape `[D1, ..., DK, D1, ..., DK]`. Then the output is a rank-`K` tensor with shape * `[D1,..., DK]`, where `diagonal(i1, ..., iK) = output(i1, ..., iK, i1, ..., iK)`. * * For example: * {{{ * // 'input' is [[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]] * diagPart(input) ==> [1, 2, 3, 4] * }}} * * This op is the inverse of [[diag]]. * * @define OpDocMathMatrixDiag * The `matrixDiag` op returns a batched diagonal tensor with the provided batched diagonal values. * * Given a `diagonal`, the op returns a tensor with that `diagonal` and everything else padded with zeros. Assuming * that `diagonal` has `k` dimensions `[I, J, K, ..., N]`, the output is a tensor of rank `k + 1` with dimensions * `[I, J, K, ..., N, N]`, where: `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. * * For example: * {{{ * // 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] (shape = [2, 4]) * matrixDiag(diagonal) ==> [[[1, 0, 0, 0] * [0, 2, 0, 0] * [0, 0, 3, 0] * [0, 0, 0, 4]], * [[5, 0, 0, 0] * [0, 6, 0, 0] * [0, 0, 7, 0] * [0, 0, 0, 8]]] // with shape [2, 4, 4] * }}} * * @define OpDocMathMatrixSetDiag * The `matrixSetDiag` op returns a batched matrix tensor with new batched diagonal values. * * Given `input` and `diagonal`, the op returns a tensor with the same shape and values as `input`, except for the * main diagonal of its innermost matrices. These diagonals will be overwritten by the values in `diagonal`. * Assuming that `input` has `k + 1` dimensions, `[I, J, K, ..., M, N]`, and `diagonal` has `k` dimensions, * `[I, J, K, ..., min(M, N)]`, then the output is a tensor of rank `k + 1` with dimensions `[I, J, K, ..., M, N]`, * where: * - `output[i, j, k, ..., m, n] == diagonal[i, j, k, ..., n]`, for `m == n`, and * - `output[i, j, k, ..., m, n] == input[i, j, k, ..., m, n]`, for `m != n`. * * @define OpDocMathMatrixDiagPart * The `matrixDiagPart` op returns the batched diagonal part of a batched tensor. * * The op returns a tensor with the `diagonal` part of the batched `input`. Assuming that `input` has `k` * dimensions, `[I, J, K, ..., M, N]`, then the output is a tensor of rank `k - 1` with dimensions * `[I, J, K, ..., min(M, N)]`, where `diagonal[i, j, k, ..., n] == input[i, j, k, ..., n, n]`. * * Note that `input` must have rank of at least `2`. * * For example: * {{{ * // 'input' is: * // [[[1, 0, 0, 0] * // [0, 2, 0, 0] * // [0, 0, 3, 0] * // [0, 0, 0, 4]], * // [[5, 0, 0, 0] * // [0, 6, 0, 0] * // [0, 0, 7, 0] * // [0, 0, 0, 8]]] with shape [2, 4, 4] * matrixDiagPart(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] // with shape [2, 4] * }}} * * @define OpDocMathMatrixBandPart * The `matrixBandPart` op copies a tensor, while setting everything outside a central band in each innermost * matrix of the tensor, to zero. * * Assuming that `input` has `k` dimensions, `[I, J, K, ..., M, N]`, the output is a tensor with the same shape, * where `band[i, j, k, ..., m, n] == indicatorBand(m, n) * input[i, j, k, ..., m, n]`. The indicator function is * defined as: * {{{ * indicatorBand(m, n) = (numSubDiagonals < 0 || m - n <= numSubDiagonals) && * (numSuperDiagonals < 0 || n - m <= numSuperDiagonals) * }}} * * For example: * {{{ * // 'input' is: * // [[ 0, 1, 2, 3] * // [-1, 0, 1, 2] * // [-2, -1, 0, 1] * // [-3, -2, -1, 0]] * matrixBandPart(input, 1, -1) ==> [[ 0, 1, 2, 3] * [-1, 0, 1, 2] * [ 0, -1, 0, 1] * [ 0, 0, -1, 0]] * matrixBandPart(input, 2, 1) ==> [[ 0, 1, 0, 0] * [-1, 0, 1, 0] * [-2, -1, 0, 1] * [ 0, -2, -1, 0]] * }}} * * Useful special cases: * {{{ * matrixBandPart(input, 0, -1) ==> Upper triangular part * matrixBandPart(input, -1, 0) ==> Lower triangular part * matrixBandPart(input, 0, 0) ==> Diagonal * }}} * * @define OpDocMathTrace * The `trace` op computes the trace of a tensor. * * The trace of a tensor is defined as the sum along the main diagonal of each inner-most matrix in it. * If the tensor is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output is a tensor of rank * `k - 2` with dimensions `[I, J, K, ..., L]` where: * `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`. * * For example: * {{{ * // 'x' is [[1, 2], [3, 4]] * trace(x) ==> 5 * * // 'x' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]] * trace(x) ==> 15 * * // 'x' is [[[ 1, 2, 3], * // [ 4, 5, 6], * // [ 7, 8, 9]], * // [[-1, -2, -3], * // [-4, -5, -6], * // [-7, -8, -9]]] * trace(x) ==> [15, -15] * }}} * * @define OpDocMathScalarMul * The `scalarMul` op multiplies a scalar tensor with another, potentially sparse, tensor. * * This function is intended for use in gradient code which might deal with [[OutputIndexedSlices]] objects, * which are easy to multiply by a scalar but more expensive to multiply with arbitrary tensors. * * @define OpDocMathMatmul * The `matmul` op multiples two matrices. * * The inputs must, following any transpositions, be tensors of rank >= 2, where the inner 2 dimensions specify * valid matrix multiplication arguments and any further outer dimensions match. * * Note that this op corresponds to a matrix product and not an element-wise product. For example: * `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`, for all indices `i` and `j`. * * Both matrices must be of the same data type. The supported types are: `BFLOAT16`, `FLOAT16`, `FLOAT32`, * `FLOAT64`, `INT32`, `COMPLEX64`, and `COMPLEX128`. * * Either matrix can be transposed and/or conjugated on the fly by setting one of the corresponding flags to * `true`. These are set to `false` by default. * * If one or both of the matrices contain a lot of zeros, a more efficient multiplication algorithm can be used * by setting the corresponding `aIsSparse` or `bIsSparse` flag to `true`. These are also set to `false` by * default. This optimization is only available for plain matrices (i.e., rank-2 tensors) with data type * `BFLOAT16` or `FLOAT32`. The break-even for using this versus a dense matrix multiply on one platform was * 30% zero values in the sparse matrix. The gradient computation of the sparse op will only take advantage of * sparsity in the input gradient when that gradient comes from a ReLU. * * For example: * {{{ * // 2-D tensor 'a' is [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] * * // 2-D tensor 'b' is [[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]] * * matmul(a, b) ==> [[58.0, 64.0], [139.0, 154.0]] * * // 3-D tensor 'a' is [[[ 1.0, 2.0, 3.0], * // [ 4.0, 5.0, 6.0]], * // [[ 7.0, 8.0, 9.0], * // [10.0, 11.0, 12.0]]] * * // 3-D tensor 'b' is [[[13.0, 14.0], * // [15.0, 16.0], * // [17.0, 18.0]], * // [[19.0, 20.0], * // [21.0, 22.0], * // [23.0, 24.0]]] * * matmul(a, b) ==> [[[ 94.0, 100.0], [229.0, 244.0]], * [[508.0, 532.0], [697.0, 730.0]]] * }}} * * @define OpDocMathCross * The `cross` op computes the pairwise cross product between two tensors. * * `a` and `b` must have the same shape; they can either be simple 3-element vectors, or have any shape * where the innermost dimension size is 3. In the latter case, each pair of corresponding 3-element vectors * is cross-multiplied independently. * * @define OpDocMathTensorDot * The `tensorDot` op computes the tensor contraction of two tensors along the specified axes. * * A tensor contraction sums the product of elements from `a` and `b` over the indices specified by `axesA` and * `axesB`. The axis `axesA(i)` of `a` must have the same dimension as the axis `axesB(i)` of `b` for all `i` in * `[0, aAxes.size)`. The tensors/sequences (depending on whether the dynamic version of the op is being used) * `axesA` and `axesB` must have identical length and consist of unique integers that specify valid axes for each * of the tensors. This operation corresponds to `numpy.tensordot(a, b, axes)` in Python. * * If `numAxes` is provided instead of `axesA` and `axesB`, then the contraction is performed over the last * `numAxes` axes of `a` and the first `numAxes` axes of `b`, in order. * * Example 1: When `a` and `b` are matrices (rank 2), the case `numAxes = 1` is equivalent to matrix * multiplication. * Example 2: When `a` and `b` are matrices (rank 2), the case `axesA = [1]` and `axesB = [0]` is equivalent to * matrix multiplication. * Example 3: Suppose that `a_{ijk}` and `b_{lmn}` represent two tensors of rank 3. Then, the case `axesA = [0]` * and `axesB = [2]` results in the rank 4 tensor `c_{jklm}` whose entry corresponding to the indices * `(j, k, l, m)` is given by: `c_{jklm} = \sum_i a_{ijk} b_{lmi}`. In general, * `rank(result) = rank(a) + rank(b) - 2 * axesA.size`. * * @define OpDocMathComplex * The `complex` op converts two real tensors to a complex tensor. * * Given a tensor `real` representing the real part of a complex number, and a tensor `imag` representing the * imaginary part of a complex number, the op returns complex numbers element-wise of the form `a + bj`, where *a* * represents the `real` part and *b* represents the `imag` part. The input tensors `real` and `imag` must have the * same shape and data type. * * For example: * {{{ * // 'real' is [2.25, 3.25] * // 'imag' is [4.75, 5.75] * complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * }}} * * @define OpDocMathReal * The `real` op returns the real part of a complex number. * * Given a tensor `input` of potentially complex numbers, the op returns a tensor of type `FLOAT32` or `FLOAT64` * that is the real part of each element in `input`. If `input` contains complex numbers of the form `a + bj`, * *a* is the real part returned by the op and *b* is the imaginary part. * * For example: * {{{ * // 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * real(input) ==> [-2.25, 3.25] * }}} * * Note that, if `input` is already real-valued, then it is returned unchanged. * * @define OpDocMathImag * The `imag` op returns the real part of a complex number. * * Given a tensor `input` of complex numbers, the op returns a tensor of type `FLOAT32` or `FLOAT64` that is the * imaginary part of each element in `input`. If `input` contains complex numbers of the form `a + bj`, *a* is the * real part and *b* is the imaginary part returned by the op. * * For example: * {{{ * // 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * real(input) ==> [4.75, 5.75] * }}} * * @define OpDocMathAngle * The `angle` op returns the element-wise complex argument of a tensor. * * Given a numeric tensor `input`, the op returns a tensor with numbers that are the complex angle of each element * in `input`. If the numbers in `input` are of the form `a + bj`, where *a* is the real part and *b* is the * imaginary part, then the complex angle returned by this operation is of the form `atan2(b, a)`. * * For example: * {{{ * // 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * angle(input) ==> [2.0132, 1.056] * }}} * * If `input` is real-valued, then a tensor containing zeros is returned. * * @define OpDocMathConjugate * The `conjugate` op returns the element-wise complex conjugate of a tensor. * * Given a numeric tensor `input`, the op returns a tensor with numbers that are the complex conjugate of each * element in `input`. If the numbers in `input` are of the form `a + bj`, where *a* is the real part and *b* is * the imaginary part, then the complex conjugate returned by this operation is of the form `a - bj`. * * For example: * {{{ * // 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * conjugate(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * }}} * * If `input` is real-valued, then it is returned unchanged. * * @define OpDocMathBucketize * The `bucketize` op bucketizes a tensor based on the provided boundaries. * * For example: * {{{ * // 'input' is [[-5, 10000], [150, 10], [5, 100]] * // 'boundaries' are [0, 10, 100] * bucketize(input, boundaries) ==> [[0, 3], [3, 2], [1, 3]] * }}} * * @define OpDocMathZerosFraction * The `zerosFraction` op computes the fraction of zeros in `input`. * * If `input` is empty, the result is `NaN`. * * This is useful in summaries to measure and report sparsity. */ private[ops] trait Documentation }
eaplatanios/tensorflow
tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/ops/Math.scala
Scala
apache-2.0
246,188
package com.datastax.spark.connector.mapper import java.lang.reflect.Method import com.datastax.driver.core.ProtocolVersion import com.datastax.spark.connector.ColumnRef import com.datastax.spark.connector.cql.TableDef import scala.reflect.ClassTag class JavaBeanColumnMapper[T : ClassTag](columnNameOverride: Map[String, String] = Map.empty) extends ReflectionColumnMapper[T] { import com.datastax.spark.connector.mapper.JavaBeanColumnMapper._ private def propertyName(accessorName: String) = { val AccessorRegex(_, strippedName) = accessorName val fieldName = strippedName(0).toLower + strippedName.substring(1) // For Java Beans, we need to figure out if there is // an equivalent name on the annotation if it has one annotationForFieldName(fieldName) getOrElse fieldName } override protected def isGetter(method: Method): Boolean = GetterRegex.findFirstMatchIn(method.getName).isDefined && method.getParameterTypes.isEmpty && method.getReturnType != Void.TYPE override protected def isSetter(method: Method): Boolean = SetterRegex.findFirstMatchIn(method.getName).isDefined && method.getParameterTypes.size == 1 && method.getReturnType == Void.TYPE private def resolve(name: String, columns: Map[String, ColumnRef]): Option[ColumnRef] = { val overridenName = columnNameOverride.getOrElse(name, name) ColumnMapperConvention.columnForProperty(overridenName, columns) } override protected def getterToColumnName(getterName: String, columns: Map[String, ColumnRef]) = { val p = propertyName(getterName) resolve(p, columns) } override protected def setterToColumnName(setterName: String, columns: Map[String, ColumnRef]) = { val p = propertyName(setterName) resolve(p, columns) } override protected def constructorParamToColumnName( paramName: String, columns: Map[String, ColumnRef]) = { resolve(paramName, columns) } /** Java Beans allow nulls in property values */ override protected def allowsNull = true // TODO: Implement override def newTable( keyspaceName: String, tableName: String, protocolVersion: ProtocolVersion = ProtocolVersion.NEWEST_SUPPORTED): TableDef = ??? } object JavaBeanColumnMapper { val GetterRegex = "^(get|is)(.+)$".r val SetterRegex = "^(set)(.+)$".r val AccessorRegex = "^(get|is|set)(.+)$".r }
shashwat7/spark-cassandra-connector
spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/mapper/JavaBeanColumnMapper.scala
Scala
apache-2.0
2,376
package vggames.shared.vraptor import java.io.{ FileInputStream, File } import java.util.Properties import br.com.caelum.vraptor.ioc.{ Component, ApplicationScoped } @Component @ApplicationScoped class Secrets { val awsSecrets = readSecrets("aws_secrets") def awsAccessKey = key(awsSecrets.getProperty("access.key")) def awsSecretKey = key(awsSecrets.getProperty("secret.key")) private def key(f : => String) : Option[String] = Option(f) private def readSecrets(secretName : String) = { val secrets = new Properties() val file = new File(System.getProperty("user.home") + "/.vgGames/" + secretName) if (file.exists) secrets.load(new FileInputStream(file)) secrets } }
vidageek/games
web/src/main/scala/vggames/shared/vraptor/Secrets.scala
Scala
gpl-3.0
707
package grasshopper.geocoder.api.geocode trait ParallelismFactor { val numCores = Runtime.getRuntime.availableProcessors() }
cfpb/grasshopper
geocoder/src/main/scala/grasshopper/geocoder/api/geocode/ParallelismFactor.scala
Scala
cc0-1.0
128
package is.hail.annotations import is.hail.asm4s._ import is.hail.types.physical.PType object UnsafeUtils { def arrayElementSize(t: PType): Long = roundUpAlignment(t.byteSize, t.alignment) def roundUpAlignment(offset: Long, alignment: Long): Long = { assert(alignment > 0) assert((alignment & (alignment - 1)) == 0) // power of 2 (offset + (alignment - 1)) & ~(alignment - 1) } def roundUpAlignment(offset: Code[Long], alignment: Long): Code[Long] = { assert(alignment > 0) assert((alignment & (alignment - 1)) == 0) // power of 2 (offset + (alignment - 1)) & ~(alignment - 1) } def packBitsToBytes(nBits: Int): Int = (nBits + 7) >>> 3 def packBitsToBytes(nBits: Code[Int]): Code[Int] = (nBits + 7) >>> 3 }
danking/hail
hail/src/main/scala/is/hail/annotations/UnsafeUtils.scala
Scala
mit
759
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.io._ import java.net.URI import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID} import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference} import scala.collection.JavaConverters._ import scala.collection.Map import scala.collection.immutable import scala.collection.mutable.HashMap import scala.language.implicitConversions import scala.reflect.{classTag, ClassTag} import scala.util.control.NonFatal import com.google.common.collect.MapMaker import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable} import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat} import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob} import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat} import org.apache.spark.annotation.DeveloperApi import org.apache.spark.broadcast.Broadcast import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil} import org.apache.spark.executor.{ExecutorMetrics, ExecutorMetricsSource} import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.internal.config.Tests._ import org.apache.spark.internal.config.UI._ import org.apache.spark.internal.plugin.PluginContainer import org.apache.spark.io.CompressionCodec import org.apache.spark.metrics.source.JVMCPUSource import org.apache.spark.partial.{ApproximateEvaluator, PartialResult} import org.apache.spark.rdd._ import org.apache.spark.resource._ import org.apache.spark.resource.ResourceUtils._ import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.scheduler._ import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend import org.apache.spark.scheduler.local.LocalSchedulerBackend import org.apache.spark.shuffle.ShuffleDataIOUtils import org.apache.spark.shuffle.api.ShuffleDriverComponents import org.apache.spark.status.{AppStatusSource, AppStatusStore} import org.apache.spark.status.api.v1.ThreadStackTrace import org.apache.spark.storage._ import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump import org.apache.spark.ui.{ConsoleProgressBar, SparkUI} import org.apache.spark.util._ import org.apache.spark.util.logging.DriverLogger /** * Main entry point for Spark functionality. A SparkContext represents the connection to a Spark * cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. * * @note Only one `SparkContext` should be active per JVM. You must `stop()` the * active `SparkContext` before creating a new one. * @param config a Spark Config object describing the application configuration. Any settings in * this config overrides the default configs as well as system properties. */ class SparkContext(config: SparkConf) extends Logging { // The call site where this SparkContext was constructed. private val creationSite: CallSite = Utils.getCallSite() if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) { // In order to prevent SparkContext from being created in executors. SparkContext.assertOnDriver() } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having started construction. // NOTE: this must be placed at the beginning of the SparkContext constructor. SparkContext.markPartiallyConstructed(this) val startTime = System.currentTimeMillis() private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false) private[spark] def assertNotStopped(): Unit = { if (stopped.get()) { val activeContext = SparkContext.activeContext.get() val activeCreationSite = if (activeContext == null) { "(No active SparkContext.)" } else { activeContext.creationSite.longForm } throw new IllegalStateException( s"""Cannot call methods on a stopped SparkContext. |This stopped SparkContext was created at: | |${creationSite.longForm} | |The currently active SparkContext was created at: | |$activeCreationSite """.stripMargin) } } /** * Create a SparkContext that loads settings from system properties (for instance, when * launching with ./bin/spark-submit). */ def this() = this(new SparkConf()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI * @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters */ def this(master: String, appName: String, conf: SparkConf) = this(SparkContext.updatedConf(conf, master, appName)) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. * @param environment Environment variables to set on worker nodes. */ def this( master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()) = { this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment)) } // The following constructors are required when Java code accesses SparkContext directly. // Please see SI-4278 /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. */ private[spark] def this(master: String, appName: String) = this(master, appName, null, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. */ private[spark] def this(master: String, appName: String, sparkHome: String) = this(master, appName, sparkHome, Nil, Map()) /** * Alternative constructor that allows setting common Spark properties directly * * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). * @param appName A name for your application, to display on the cluster web UI. * @param sparkHome Location where Spark is installed on cluster nodes. * @param jars Collection of JARs to send to the cluster. These can be paths on the local file * system or HDFS, HTTP, HTTPS, or FTP URLs. */ private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) = this(master, appName, sparkHome, jars, Map()) // log out Spark Version in Spark driver log logInfo(s"Running Spark version $SPARK_VERSION") /* ------------------------------------------------------------------------------------- * | Private variables. These variables keep the internal state of the context, and are | | not accessible by the outside world. They're mutable since we want to initialize all | | of them to some neutral value ahead of time, so that calling "stop()" while the | | constructor is still running is safe. | * ------------------------------------------------------------------------------------- */ private var _conf: SparkConf = _ private var _eventLogDir: Option[URI] = None private var _eventLogCodec: Option[String] = None private var _listenerBus: LiveListenerBus = _ private var _env: SparkEnv = _ private var _statusTracker: SparkStatusTracker = _ private var _progressBar: Option[ConsoleProgressBar] = None private var _ui: Option[SparkUI] = None private var _hadoopConfiguration: Configuration = _ private var _executorMemory: Int = _ private var _schedulerBackend: SchedulerBackend = _ private var _taskScheduler: TaskScheduler = _ private var _heartbeatReceiver: RpcEndpointRef = _ @volatile private var _dagScheduler: DAGScheduler = _ private var _applicationId: String = _ private var _applicationAttemptId: Option[String] = None private var _eventLogger: Option[EventLoggingListener] = None private var _driverLogger: Option[DriverLogger] = None private var _executorAllocationManager: Option[ExecutorAllocationManager] = None private var _cleaner: Option[ContextCleaner] = None private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ private var _resources: immutable.Map[String, ResourceInformation] = _ private var _shuffleDriverComponents: ShuffleDriverComponents = _ private var _plugins: Option[PluginContainer] = None private var _resourceProfileManager: ResourceProfileManager = _ /* ------------------------------------------------------------------------------------- * | Accessors and public fields. These provide access to the internal state of the | | context. | * ------------------------------------------------------------------------------------- */ private[spark] def conf: SparkConf = _conf /** * Return a copy of this SparkContext's configuration. The configuration ''cannot'' be * changed at runtime. */ def getConf: SparkConf = conf.clone() def resources: Map[String, ResourceInformation] = _resources def jars: Seq[String] = _jars def files: Seq[String] = _files def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED) private[spark] def eventLogDir: Option[URI] = _eventLogDir private[spark] def eventLogCodec: Option[String] = _eventLogCodec def isLocal: Boolean = Utils.isLocalMaster(_conf) /** * @return true if context is stopped or in the midst of stopping. */ def isStopped: Boolean = stopped.get() private[spark] def statusStore: AppStatusStore = _statusStore // An asynchronous listener bus for Spark events private[spark] def listenerBus: LiveListenerBus = _listenerBus // This function allows components created by SparkEnv to be mocked in unit tests: private[spark] def createSparkEnv( conf: SparkConf, isLocal: Boolean, listenerBus: LiveListenerBus): SparkEnv = { SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf)) } private[spark] def env: SparkEnv = _env // Used to store a URL for each static file/jar together with the file's local timestamp private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala // Keeps track of all persisted RDDs private[spark] val persistentRdds = { val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]() map.asScala } def statusTracker: SparkStatusTracker = _statusTracker private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar private[spark] def ui: Option[SparkUI] = _ui def uiWebUrl: Option[String] = _ui.map(_.webUrl) /** * A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse. * * @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you * plan to set some global configurations for all Hadoop RDDs. */ def hadoopConfiguration: Configuration = _hadoopConfiguration private[spark] def executorMemory: Int = _executorMemory // Environment variables to pass to our executors. private[spark] val executorEnvs = HashMap[String, String]() // Set SPARK_USER for user who is running SparkContext. val sparkUser = Utils.getCurrentUserName() private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend private[spark] def taskScheduler: TaskScheduler = _taskScheduler private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = { _taskScheduler = ts } private[spark] def dagScheduler: DAGScheduler = _dagScheduler private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = { _dagScheduler = ds } private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents /** * A unique identifier for the Spark application. * Its format depends on the scheduler implementation. * (i.e. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' * in case of MESOS something like 'driver-20170926223339-0001' * ) */ def applicationId: String = _applicationId def applicationAttemptId: Option[String] = _applicationAttemptId private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] = _executorAllocationManager private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager private[spark] def cleaner: Option[ContextCleaner] = _cleaner private[spark] var checkpointDir: Option[String] = None // Thread Local variable that can be used by users to pass information down the stack protected[spark] val localProperties = new InheritableThreadLocal[Properties] { override protected def childValue(parent: Properties): Properties = { // Note: make a clone such that changes in the parent properties aren't reflected in // the those of the children threads, which has confusing semantics (SPARK-10563). Utils.cloneProperties(parent) } override protected def initialValue(): Properties = new Properties() } /* ------------------------------------------------------------------------------------- * | Initialization. This code initializes the context in a manner that is exception-safe. | | All internal fields holding state are initialized here, and any error prompts the | | stop() method to be called. | * ------------------------------------------------------------------------------------- */ private def warnSparkMem(value: String): String = { logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " + "deprecated, please use spark.executor.memory instead.") value } /** Control our logLevel. This overrides any user-defined log settings. * @param logLevel The desired log level as a string. * Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN */ def setLogLevel(logLevel: String): Unit = { // let's allow lowercase or mixed case too val upperCased = logLevel.toUpperCase(Locale.ROOT) require(SparkContext.VALID_LOG_LEVELS.contains(upperCased), s"Supplied level $logLevel did not match one of:" + s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}") Utils.setLogLevel(org.apache.log4j.Level.toLevel(upperCased)) } try { _conf = config.clone() _conf.validateSettings() _conf.set("spark.app.startTime", startTime.toString) if (!_conf.contains("spark.master")) { throw new SparkException("A master URL must be set in your configuration") } if (!_conf.contains("spark.app.name")) { throw new SparkException("An application name must be set in your configuration") } _driverLogger = DriverLogger(_conf) val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE) _resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt) logResourceInfo(SPARK_DRIVER_PREFIX, _resources) // log out spark.app.name in the Spark driver logs logInfo(s"Submitted application: $appName") // System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) { throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " + "Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.") } if (_conf.getBoolean("spark.logConf", false)) { logInfo("Spark configuration:\n" + _conf.toDebugString) } // Set Spark driver host and port system properties. This explicitly sets the configuration // instead of relying on the default value of the config constant. _conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS)) _conf.setIfMissing(DRIVER_PORT, 0) _conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER) _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten _eventLogDir = if (isEventLogEnabled) { val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/") Some(Utils.resolveURI(unresolvedDir)) } else { None } _eventLogCodec = { val compress = _conf.get(EVENT_LOG_COMPRESS) if (compress && isEventLogEnabled) { Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName) } else { None } } _listenerBus = new LiveListenerBus(_conf) _resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus) // Initialize the app status store and listener before SparkEnv is created so that it gets // all events. val appStatusSource = AppStatusSource.createSource(conf) _statusStore = AppStatusStore.createLiveStore(conf, appStatusSource) listenerBus.addToStatusQueue(_statusStore.listener.get) // Create the Spark execution environment (cache, map output tracker, etc) _env = createSparkEnv(_conf, isLocal, listenerBus) SparkEnv.set(_env) // If running the REPL, register the repl's output dir with the file server. _conf.getOption("spark.repl.class.outputDir").foreach { path => val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path)) _conf.set("spark.repl.class.uri", replUri) } _statusTracker = new SparkStatusTracker(this, _statusStore) _progressBar = if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) { Some(new ConsoleProgressBar(this)) } else { None } _ui = if (conf.get(UI_ENABLED)) { Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "", startTime)) } else { // For tests, do not enable the UI None } // Bind the UI before starting the task scheduler to communicate // the bound port to the cluster manager properly _ui.foreach(_.bind()) _hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf) // Performance optimization: this dummy call to .size() triggers eager evaluation of // Configuration's internal `properties` field, guaranteeing that it will be computed and // cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create // a new per-session Configuration. If `properties` has not been computed by that time // then each newly-created Configuration will perform its own expensive IO and XML // parsing to load configuration defaults and populate its own properties. By ensuring // that we've pre-computed the parent's properties, the child Configuration will simply // clone the parent's properties. _hadoopConfiguration.size() // Add each JAR given through the constructor if (jars != null) { jars.foreach(jar => addJar(jar, true)) if (addedJars.nonEmpty) { _conf.set("spark.app.initial.jar.urls", addedJars.keys.toSeq.mkString(",")) } } if (files != null) { files.foreach(file => addFile(file, false, true)) if (addedFiles.nonEmpty) { _conf.set("spark.app.initial.file.urls", addedFiles.keys.toSeq.mkString(",")) } } _executorMemory = _conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) .map(warnSparkMem)) .map(Utils.memoryStringToMb) .getOrElse(1024) // Convert java options to env vars as a work around // since we can't set env vars directly in sbt. for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key)) value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} { executorEnvs(envKey) = value } Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v => executorEnvs("SPARK_PREPEND_CLASSES") = v } // The Mesos scheduler backend relies on this environment variable to set executor memory. // TODO: Set this only in the Mesos scheduler. executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m" executorEnvs ++= _conf.getExecutorEnv executorEnvs("SPARK_USER") = sparkUser _shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(config).driver() _shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) => _conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v) } // We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will // retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640) _heartbeatReceiver = env.rpcEnv.setupEndpoint( HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this)) // Initialize any plugins before the task scheduler is initialized. _plugins = PluginContainer(this, _resources.asJava) // Create and start the scheduler val (sched, ts) = SparkContext.createTaskScheduler(this, master, deployMode) _schedulerBackend = sched _taskScheduler = ts _dagScheduler = new DAGScheduler(this) _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) val _executorMetricsSource = if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) { Some(new ExecutorMetricsSource) } else { None } // create and start the heartbeater for collecting memory metrics _heartbeater = new Heartbeater( () => SparkContext.this.reportHeartBeat(_executorMetricsSource), "driver-heartbeater", conf.get(EXECUTOR_HEARTBEAT_INTERVAL)) _heartbeater.start() // start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's // constructor _taskScheduler.start() _applicationId = _taskScheduler.applicationId() _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) if (_conf.get(UI_REVERSE_PROXY)) { System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. _env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED)) // Attach the driver metrics servlet handler to the web ui after the metrics system is started. _env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler))) _eventLogger = if (isEventLogEnabled) { val logger = new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get, _conf, _hadoopConfiguration) logger.start() listenerBus.addToEventLogQueue(logger) Some(logger) } else { None } _cleaner = if (_conf.get(CLEANER_REFERENCE_TRACKING)) { Some(new ContextCleaner(this, _shuffleDriverComponents)) } else { None } _cleaner.foreach(_.start()) val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf) _executorAllocationManager = if (dynamicAllocationEnabled) { schedulerBackend match { case b: ExecutorAllocationClient => Some(new ExecutorAllocationManager( schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf, cleaner = cleaner, resourceProfileManager = resourceProfileManager)) case _ => None } } else { None } _executorAllocationManager.foreach(_.start()) setupAndStartListenerBus() postEnvironmentUpdate() postApplicationStart() // Post init _taskScheduler.postStartHook() _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) _executorMetricsSource.foreach(_.register(_env.metricsSystem)) _executorAllocationManager.foreach { e => _env.metricsSystem.registerSource(e.executorAllocationManagerSource) } appStatusSource.foreach(_env.metricsSystem.registerSource(_)) _plugins.foreach(_.registerMetrics(applicationId)) // Make sure the context is stopped if the user forgets about it. This avoids leaving // unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM // is killed, though. logDebug("Adding shutdown hook") // force eager creation of logger _shutdownHookRef = ShutdownHookManager.addShutdownHook( ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () => logInfo("Invoking stop() from shutdown hook") try { stop() } catch { case e: Throwable => logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e) } } } catch { case NonFatal(e) => logError("Error initializing SparkContext.", e) try { stop() } catch { case NonFatal(inner) => logError("Error stopping SparkContext after init error.", inner) } finally { throw e } } /** * Called by the web UI to obtain executor thread dumps. This method may be expensive. * Logs an error and returns None if we failed to obtain a thread dump, which could occur due * to an executor being dead or unresponsive or due to network issues while sending the thread * dump message back to the driver. */ private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = { try { if (executorId == SparkContext.DRIVER_IDENTIFIER) { Some(Utils.getThreadDump()) } else { val endpointRef = env.blockManager.master.getExecutorEndpointRef(executorId).get Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump)) } } catch { case e: Exception => logError(s"Exception getting thread dump from executor $executorId", e) None } } private[spark] def getLocalProperties: Properties = localProperties.get() private[spark] def setLocalProperties(props: Properties): Unit = { localProperties.set(props) } /** * Set a local property that affects jobs submitted from this thread, such as the Spark fair * scheduler pool. User-defined properties may also be set here. These properties are propagated * through to worker tasks and can be accessed there via * [[org.apache.spark.TaskContext#getLocalProperty]]. * * These properties are inherited by child threads spawned from this thread. This * may have unexpected consequences when working with thread pools. The standard java * implementation of thread pools have worker threads spawn other worker threads. * As a result, local properties may propagate unpredictably. */ def setLocalProperty(key: String, value: String): Unit = { if (value == null) { localProperties.get.remove(key) } else { localProperties.get.setProperty(key, value) } } /** * Get a local property set in this thread, or null if it is missing. See * `org.apache.spark.SparkContext.setLocalProperty`. */ def getLocalProperty(key: String): String = Option(localProperties.get).map(_.getProperty(key)).orNull /** Set a human readable description of the current job. */ def setJobDescription(value: String): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value) } /** * Assigns a group ID to all the jobs started by this thread until the group ID is set to a * different value or cleared. * * Often, a unit of execution in an application consists of multiple Spark actions or jobs. * Application programmers can use this method to group all those jobs together and give a * group description. Once set, the Spark web UI will associate such jobs with this group. * * The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all * running jobs in this group. For example, * {{{ * // In the main thread: * sc.setJobGroup("some_job_to_cancel", "some job description") * sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() * * // In a separate thread: * sc.cancelJobGroup("some_job_to_cancel") * }}} * * @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()` * being called on the job's executor threads. This is useful to help ensure that the tasks * are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS * may respond to Thread.interrupt() by marking nodes as dead. */ def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId) // Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids // changing several public APIs and allows Spark cancellations outside of the cancelJobGroup // APIs to also take advantage of this property (e.g., internal job failures or canceling from // JobProgressTab UI) on a per-job basis. setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString) } /** Clear the current thread's job group ID and its description. */ def clearJobGroup(): Unit = { setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null) setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null) setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null) } /** * Execute a block of code in a scope such that all new RDDs created in this body will * be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}. * * @note Return statements are NOT allowed in the given body. */ private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body) // Methods for creating RDDs /** Distribute a local Scala collection to form an RDD. * * @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call * to parallelize and before the first action on the RDD, the resultant RDD will reflect the * modified collection. Pass a copy of the argument to avoid this. * @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an * RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def parallelize[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { assertNotStopped() new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]()) } /** * Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by * `step` every element. * * @note if we need to cache this RDD, we should make sure each partition does not exceed limit. * * @param start the start value. * @param end the end value. * @param step the incremental step * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed range */ def range( start: Long, end: Long, step: Long = 1, numSlices: Int = defaultParallelism): RDD[Long] = withScope { assertNotStopped() // when step is 0, range will run infinitely require(step != 0, "step cannot be 0") val numElements: BigInt = { val safeStart = BigInt(start) val safeEnd = BigInt(end) if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) { (safeEnd - safeStart) / step } else { // the remainder has the same sign with range, could add 1 more (safeEnd - safeStart) / step + 1 } } parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) => val partitionStart = (i * numElements) / numSlices * step + start val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start def getSafeMargin(bi: BigInt): Long = if (bi.isValidLong) { bi.toLong } else if (bi > 0) { Long.MaxValue } else { Long.MinValue } val safePartitionStart = getSafeMargin(partitionStart) val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] { private[this] var number: Long = safePartitionStart private[this] var overflow: Boolean = false override def hasNext = if (!overflow) { if (step > 0) { number < safePartitionEnd } else { number > safePartitionEnd } } else false override def next() = { val ret = number number += step if (number < ret ^ step < 0) { // we have Long.MaxValue + Long.MaxValue < Long.MaxValue // and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step // back, we are pretty sure that we have an overflow. overflow = true } ret } } } } /** Distribute a local Scala collection to form an RDD. * * This method is identical to `parallelize`. * @param seq Scala collection to distribute * @param numSlices number of partitions to divide the collection into * @return RDD representing distributed collection */ def makeRDD[T: ClassTag]( seq: Seq[T], numSlices: Int = defaultParallelism): RDD[T] = withScope { parallelize(seq, numSlices) } /** * Distribute a local Scala collection to form an RDD, with one or more * location preferences (hostnames of Spark nodes) for each object. * Create a new partition for each collection item. * @param seq list of tuples of data and location preferences (hostnames of Spark nodes) * @return RDD representing data partitioned according to location preferences */ def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope { assertNotStopped() val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs) } /** * Read a text file from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI, and return it as an RDD of Strings. * The text files must be encoded as UTF-8. * * @param path path to the text file on a supported file system * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of lines of the text file */ def textFile( path: String, minPartitions: Int = defaultMinPartitions): RDD[String] = withScope { assertNotStopped() hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text], minPartitions).map(pair => pair._2.toString).setName(path) } /** * Read a directory of text files from HDFS, a local file system (available on all nodes), or any * Hadoop-supported file system URI. Each file is read as a single record and returned in a * key-value pair, where the key is the path of each file, the value is the content of each file. * The text files must be encoded as UTF-8. * * <p> For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`, * * <p> then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred, large file is also allowable, but may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and the corresponding file content */ def wholeTextFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new WholeTextFileRDD( this, classOf[WholeTextFileInputFormat], classOf[Text], classOf[Text], updateConf, minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path) } /** * Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file * (useful for binary data) * * For example, if you have the following files: * {{{ * hdfs://a-hdfs-path/part-00000 * hdfs://a-hdfs-path/part-00001 * ... * hdfs://a-hdfs-path/part-nnnnn * }}} * * Do * `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`, * * then `rdd` contains * {{{ * (a-hdfs-path/part-00000, its content) * (a-hdfs-path/part-00001, its content) * ... * (a-hdfs-path/part-nnnnn, its content) * }}} * * @note Small files are preferred; very large files may cause bad performance. * @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files * in a directory rather than `.../path/` or `.../path` * @note Partitioning is determined by data locality. This may result in too few partitions * by default. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param minPartitions A suggestion value of the minimal splitting number for input data. * @return RDD representing tuples of file path and corresponding file content */ def binaryFiles( path: String, minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope { assertNotStopped() val job = NewHadoopJob.getInstance(hadoopConfiguration) // Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updateConf = job.getConfiguration new BinaryFileRDD( this, classOf[StreamInputFormat], classOf[String], classOf[PortableDataStream], updateConf, minPartitions).setName(path) } /** * Load data from a flat binary file, assuming the length of each record is constant. * * @note We ensure that the byte array for each record in the resulting RDD * has the provided record length. * * @param path Directory to the input data files, the path can be comma separated paths as the * list of inputs. * @param recordLength The length at which to split the records * @param conf Configuration for setting up the dataset. * * @return An RDD of data with values, represented as byte arrays */ def binaryRecords( path: String, recordLength: Int, conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope { assertNotStopped() conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength) val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path, classOf[FixedLengthBinaryInputFormat], classOf[LongWritable], classOf[BytesWritable], conf = conf) br.map { case (k, v) => val bytes = v.copyBytes() assert(bytes.length == recordLength, "Byte array does not have correct length") bytes } } /** * Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other * necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable), * using the older MapReduce API (`org.apache.hadoop.mapred`). * * @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions Minimum number of Hadoop Splits to generate. * @return RDD of tuples of key and corresponding value * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def hadoopRDD[K, V]( conf: JobConf, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf before broadcasting it. SparkHadoopUtil.get.addCredentials(conf) new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions) } /** Get an RDD for a Hadoop file with an arbitrary InputFormat * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param inputFormatClass storage format of the data to be read * @param keyClass `Class` of the key associated with the `inputFormatClass` parameter * @param valueClass `Class` of the value associated with the `inputFormatClass` parameter * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V]( path: String, inputFormatClass: Class[_ <: InputFormat[K, V]], keyClass: Class[K], valueClass: Class[V], minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it. val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration)) val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path) new HadoopRDD( this, confBroadcast, Some(setInputPathsFunc), inputFormatClass, keyClass, valueClass, minPartitions).setName(path) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]] (path: String, minPartitions: Int) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile(path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]], minPartitions) } /** * Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, * values and the InputFormat so that users don't need to pass them directly. Instead, callers * can just write, for example, * {{{ * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * }}} * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths as * a list of inputs * @return RDD of tuples of key and corresponding value */ def hadoopFile[K, V, F <: InputFormat[K, V]](path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { hadoopFile[K, V, F](path, defaultMinPartitions) } /** * Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys, * values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user * don't need to pass them directly. Instead, callers can just write, for example: * ``` * val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path) * ``` * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]] (path: String) (implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope { newAPIHadoopFile( path, fm.runtimeClass.asInstanceOf[Class[F]], km.runtimeClass.asInstanceOf[Class[K]], vm.runtimeClass.asInstanceOf[Class[V]]) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * @param conf Hadoop configuration * @return RDD of tuples of key and corresponding value */ def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]( path: String, fClass: Class[F], kClass: Class[K], vClass: Class[V], conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(hadoopConfiguration) // The call to NewHadoopJob automatically adds security credentials to conf, // so we don't need to explicitly add them ourselves val job = NewHadoopJob.getInstance(conf) // Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking // comma separated files as input. (see SPARK-7155) NewFileInputFormat.setInputPaths(job, path) val updatedConf = job.getConfiguration new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path) } /** * Get an RDD for a given Hadoop file with an arbitrary new API InputFormat * and extra configuration options to pass to the input format. * * @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast. * Therefore if you plan to reuse this conf to create multiple RDDs, you need to make * sure you won't modify the conf. A safe approach is always creating a new conf for * a new RDD. * @param fClass storage format of the data to be read * @param kClass `Class` of the key associated with the `fClass` parameter * @param vClass `Class` of the value associated with the `fClass` parameter * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. */ def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]]( conf: Configuration = hadoopConfiguration, fClass: Class[F], kClass: Class[K], vClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() // This is a hack to enforce loading hdfs-site.xml. // See SPARK-11227 for details. FileSystem.getLocal(conf) // Add necessary security credentials to the JobConf. Required to access secure HDFS. val jconf = new JobConf(conf) SparkHadoopUtil.get.addCredentials(jconf) new NewHadoopRDD(this, fClass, kClass, vClass, jconf) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V](path: String, keyClass: Class[K], valueClass: Class[V], minPartitions: Int ): RDD[(K, V)] = withScope { assertNotStopped() val inputFormatClass = classOf[SequenceFileInputFormat[K, V]] hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions) } /** * Get an RDD for a Hadoop SequenceFile with given key and value types. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param keyClass `Class` of the key associated with `SequenceFileInputFormat` * @param valueClass `Class` of the value associated with `SequenceFileInputFormat` * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V]( path: String, keyClass: Class[K], valueClass: Class[V]): RDD[(K, V)] = withScope { assertNotStopped() sequenceFile(path, keyClass, valueClass, defaultMinPartitions) } /** * Version of sequenceFile() for types implicitly convertible to Writables through a * WritableConverter. For example, to access a SequenceFile where the keys are Text and the * values are IntWritable, you could simply write * {{{ * sparkContext.sequenceFile[String, Int](path, ...) * }}} * * WritableConverters are provided in a somewhat strange way (by an implicit function) to support * both subclasses of Writable and types for which we define a converter (e.g. Int to * IntWritable). The most natural thing would've been to have implicit objects for the * converters, but then we couldn't have an object for every subclass of Writable (you can't * have a parameterized singleton object). We use functions instead to create a new converter * for the appropriate type. In addition, we pass the converter a ClassTag of its type to * allow it to figure out the Writable class to use in the subclass case. * * @note Because Hadoop's RecordReader class re-uses the same Writable object for each * record, directly caching the returned RDD or directly passing it to an aggregation or shuffle * operation will create many references to the same object. * If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first * copy them using a `map` function. * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD of tuples of key and corresponding value */ def sequenceFile[K, V] (path: String, minPartitions: Int = defaultMinPartitions) (implicit km: ClassTag[K], vm: ClassTag[V], kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = { withScope { assertNotStopped() val kc = clean(kcf)() val vc = clean(vcf)() val format = classOf[SequenceFileInputFormat[Writable, Writable]] val writables = hadoopFile(path, format, kc.writableClass(km).asInstanceOf[Class[Writable]], vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions) writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) } } } /** * Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and * BytesWritable values that contain a serialized partition. This is still an experimental * storage format and may not be supported exactly as is in future Spark releases. It will also * be pretty slow if you use the default serializer (Java serialization), * though the nice thing about it is that there's very little effort required to save arbitrary * objects. * * @param path directory to the input data files, the path can be comma separated paths * as a list of inputs * @param minPartitions suggested minimum number of partitions for the resulting RDD * @return RDD representing deserialized data from the file(s) */ def objectFile[T: ClassTag]( path: String, minPartitions: Int = defaultMinPartitions): RDD[T] = withScope { assertNotStopped() sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions) .flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader)) } protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope { new ReliableCheckpointRDD[T](this, path) } /** Build the union of a list of RDDs. */ def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope { val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty) val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) { new PartitionerAwareUnionRDD(this, nonEmptyRdds) } else { new UnionRDD(this, nonEmptyRdds) } } /** Build the union of a list of RDDs passed as variable-length arguments. */ def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope { union(Seq(first) ++ rest) } /** Get an RDD that has no partitions or elements. */ def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this) // Methods for creating shared variables /** * Register the given accumulator. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _]): Unit = { acc.register(this) } /** * Register the given accumulator with given name. * * @note Accumulators must be registered before use, or it will throw exception. */ def register(acc: AccumulatorV2[_, _], name: String): Unit = { acc.register(this, name = Option(name)) } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator: LongAccumulator = { val acc = new LongAccumulator register(acc) acc } /** * Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`. */ def longAccumulator(name: String): LongAccumulator = { val acc = new LongAccumulator register(acc, name) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator: DoubleAccumulator = { val acc = new DoubleAccumulator register(acc) acc } /** * Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`. */ def doubleAccumulator(name: String): DoubleAccumulator = { val acc = new DoubleAccumulator register(acc, name) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T]: CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc) acc } /** * Create and register a `CollectionAccumulator`, which starts with empty list and accumulates * inputs by adding them into the list. */ def collectionAccumulator[T](name: String): CollectionAccumulator[T] = { val acc = new CollectionAccumulator[T] register(acc, name) acc } /** * Broadcast a read-only variable to the cluster, returning a * [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions. * The variable will be sent to each cluster only once. * * @param value value to broadcast to the Spark nodes * @return `Broadcast` object, a read-only variable cached on each machine */ def broadcast[T: ClassTag](value: T): Broadcast[T] = { assertNotStopped() require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass), "Can not directly broadcast RDDs; instead, call collect() and broadcast the result.") val bc = env.broadcastManager.newBroadcast[T](value, isLocal) val callSite = getCallSite logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm) cleaner.foreach(_.registerBroadcastForCleanup(bc)) bc } /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String): Unit = { addFile(path, false, false) } /** * Returns a list of file paths that are added to resources. */ def listFiles(): Seq[String] = addedFiles.keySet.toSeq /** * Add a file to be downloaded with this Spark job on every node. * * If a file is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, * use `SparkFiles.get(fileName)` to find its download location. * @param recursive if true, a directory can be given in `path`. Currently directories are * only supported for Hadoop-supported filesystems. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addFile(path: String, recursive: Boolean): Unit = { addFile(path, recursive, false) } private def addFile(path: String, recursive: Boolean, addedOnSubmit: Boolean): Unit = { val uri = new Path(path).toUri val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => logWarning("File with 'local' scheme is not supported to add to file server, since " + "it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme if (!Array("http", "https", "ftp").contains(scheme)) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { throw new SparkException(s"addFile does not support local directories when not running " + "local mode.") } if (!recursive && isDir) { throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " + "turned on.") } } else { // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) } val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) } else { if (uri.getScheme == null) { schemeCorrectedURI.toString } else { path } } val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis if (addedFiles.putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf, env.securityManager, hadoopConfiguration, timestamp, useCache = false) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + "is not supported in the current version.") } } /** * :: DeveloperApi :: * Register a listener to receive up-calls from events that happen during execution. */ @DeveloperApi def addSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.addToSharedQueue(listener) } /** * :: DeveloperApi :: * Deregister the listener from Spark's listener bus. */ @DeveloperApi def removeSparkListener(listener: SparkListenerInterface): Unit = { listenerBus.removeListener(listener) } private[spark] def getExecutorIds(): Seq[String] = { schedulerBackend match { case b: ExecutorAllocationClient => b.getExecutorIds() case _ => logWarning("Requesting executors is not supported by current scheduler.") Nil } } /** * Get the max number of tasks that can be concurrent launched based on the ResourceProfile * could be used, even if some of them are being used at the moment. * Note that please don't cache the value returned by this method, because the number can change * due to add/remove executors. * * @param rp ResourceProfile which to use to calculate max concurrent tasks. * @return The max number of tasks that can be concurrent launched currently. */ private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = { schedulerBackend.maxNumConcurrentTasks(rp) } /** * Update the cluster manager on our scheduling needs. Three bits of information are included * to help it make decisions. This applies to the default ResourceProfile. * @param numExecutors The total number of executors we'd like to have. The cluster manager * shouldn't kill any running executor to reach this number, but, * if all existing executors were to die, this is the number of executors * we'd want to be allocated. * @param localityAwareTasks The number of tasks in all active stages that have a locality * preferences. This includes running, pending, and completed tasks. * @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages * that would like to like to run on that host. * This includes running, pending, and completed tasks. * @return whether the request is acknowledged by the cluster manager. */ @DeveloperApi def requestTotalExecutors( numExecutors: Int, localityAwareTasks: Int, hostToLocalTaskCount: immutable.Map[String, Int] ): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => // this is being applied to the default resource profile, would need to add api to support // others val defaultProfId = resourceProfileManager.defaultResourceProfile.id b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors), immutable.Map(localityAwareTasks -> defaultProfId), immutable.Map(defaultProfId -> hostToLocalTaskCount)) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request an additional number of executors from the cluster manager. * @return whether the request is received. */ @DeveloperApi def requestExecutors(numAdditionalExecutors: Int): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.requestExecutors(numAdditionalExecutors) case _ => logWarning("Requesting executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executors. * * This is not supported when dynamic allocation is turned on. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executors it kills * through this method with new ones, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutors(executorIds: Seq[String]): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => require(executorAllocationManager.isEmpty, "killExecutors() unsupported with Dynamic Allocation turned on") b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** * :: DeveloperApi :: * Request that the cluster manager kill the specified executor. * * @note This is an indication to the cluster manager that the application wishes to adjust * its resource usage downwards. If the application wishes to replace the executor it kills * through this method with a new one, it should follow up explicitly with a call to * {{SparkContext#requestExecutors}}. * * @return whether the request is received. */ @DeveloperApi def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId)) /** * Request that the cluster manager kill the specified executor without adjusting the * application resource requirements. * * The effect is that a new executor will be launched in place of the one killed by * this request. This assumes the cluster manager will automatically and eventually * fulfill all missing application resource requests. * * @note The replace is by no means guaranteed; another application on the same cluster * can steal the window of opportunity and acquire this application's resources in the * mean time. * * @return whether the request is received. */ private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { schedulerBackend match { case b: ExecutorAllocationClient => b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true, force = true).nonEmpty case _ => logWarning("Killing executors is not supported by current scheduler.") false } } /** The version of Spark on which this application is running. */ def version: String = SPARK_VERSION /** * Return a map from the block manager to the max memory available for caching and the remaining * memory available for caching. */ def getExecutorMemoryStatus: Map[String, (Long, Long)] = { assertNotStopped() env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) => (blockManagerId.host + ":" + blockManagerId.port, mem) } } /** * :: DeveloperApi :: * Return information about what RDDs are cached, if they are in mem or on disk, how much space * they take, etc. */ @DeveloperApi def getRDDStorageInfo: Array[RDDInfo] = { getRDDStorageInfo(_ => true) } private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = { assertNotStopped() val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray rddInfos.foreach { rddInfo => val rddId = rddInfo.id val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId)) rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0) rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L) rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L) } rddInfos.filter(_.isCached) } /** * Returns an immutable map of RDDs that have marked themselves as persistent via cache() call. * * @note This does not necessarily mean the caching or computation was successful. */ def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap /** * :: DeveloperApi :: * Return pools for fair scheduler */ @DeveloperApi def getAllPools: Seq[Schedulable] = { assertNotStopped() // TODO(xiajunluan): We should take nested pools into account taskScheduler.rootPool.schedulableQueue.asScala.toSeq } /** * :: DeveloperApi :: * Return the pool associated with the given name, if one exists */ @DeveloperApi def getPoolForName(pool: String): Option[Schedulable] = { assertNotStopped() Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool)) } /** * Return current scheduling mode */ def getSchedulingMode: SchedulingMode.SchedulingMode = { assertNotStopped() taskScheduler.schedulingMode } /** * Gets the locality information associated with the partition in a particular rdd * @param rdd of interest * @param partition to be looked up for locality * @return list of preferred locations for the partition */ private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { dagScheduler.getPreferredLocs(rdd, partition) } /** * Register an RDD to be persisted in memory and/or disk storage */ private[spark] def persistRDD(rdd: RDD[_]): Unit = { persistentRdds(rdd.id) = rdd } /** * Unpersist an RDD from memory and/or disk storage */ private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = { env.blockManager.master.removeRdd(rddId, blocking) persistentRdds.remove(rddId) listenerBus.post(SparkListenerUnpersistRDD(rddId)) } /** * Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future. * * If a jar is added during execution, it will not be available until the next TaskSet starts. * * @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), * an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node. * * @note A path can be added only once. Subsequent additions of the same path are ignored. */ def addJar(path: String): Unit = { addJar(path, false) } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { def addLocalJarFile(file: File): String = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") } if (file.isDirectory) { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } env.rpcEnv.fileServer.addJar(file) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) null } } def checkRemoteJarFile(path: String): String = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp").contains(scheme)) { try { val fs = hadoopPath.getFileSystem(hadoopConfiguration) if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } if (fs.isDirectory(hadoopPath)) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } path } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) null } } else { path } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { val key = if (path.contains("\\")) { // For local paths with backslashes on Windows, URI throws an exception addLocalJarFile(new File(path)) } else { val uri = new Path(path).toUri // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) uri.getScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded addLocalJarFile(new File(uri.getPath)) // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node case "local" => "file:" + uri.getPath case _ => checkRemoteJarFile(path) } } if (key != null) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis if (addedJars.putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added JAR $path at $key with timestamp $timestamp") postEnvironmentUpdate() } else { logWarning(s"The jar $path has been added already. Overwriting of added jars " + "is not supported in the current version.") } } } } /** * Returns a list of jar files that are added to resources. */ def listJars(): Seq[String] = addedJars.keySet.toSeq /** * When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark * may wait for some internal threads to finish. It's better to use this method to stop * SparkContext instead. */ private[spark] def stopInNewThread(): Unit = { new Thread("stop-spark-context") { setDaemon(true) override def run(): Unit = { try { SparkContext.this.stop() } catch { case e: Throwable => logError(e.getMessage, e) throw e } } }.start() } /** * Shut down the SparkContext. */ def stop(): Unit = { if (LiveListenerBus.withinListenerThread.value) { throw new SparkException(s"Cannot stop SparkContext within listener bus thread.") } // Use the stopping variable to ensure no contention for the stop scenario. // Still track the stopped variable for use elsewhere in the code. if (!stopped.compareAndSet(false, true)) { logInfo("SparkContext already stopped.") return } if (_shutdownHookRef != null) { ShutdownHookManager.removeShutdownHook(_shutdownHookRef) } if (listenerBus != null) { Utils.tryLogNonFatalError { postApplicationEnd() } } Utils.tryLogNonFatalError { _driverLogger.foreach(_.stop()) } Utils.tryLogNonFatalError { _ui.foreach(_.stop()) } if (env != null) { Utils.tryLogNonFatalError { env.metricsSystem.report() } } Utils.tryLogNonFatalError { _cleaner.foreach(_.stop()) } Utils.tryLogNonFatalError { _executorAllocationManager.foreach(_.stop()) } if (_dagScheduler != null) { Utils.tryLogNonFatalError { _dagScheduler.stop() } _dagScheduler = null } if (_listenerBusStarted) { Utils.tryLogNonFatalError { listenerBus.stop() _listenerBusStarted = false } } Utils.tryLogNonFatalError { _plugins.foreach(_.shutdown()) } Utils.tryLogNonFatalError { _eventLogger.foreach(_.stop()) } if (_heartbeater != null) { Utils.tryLogNonFatalError { _heartbeater.stop() } _heartbeater = null } if (_shuffleDriverComponents != null) { Utils.tryLogNonFatalError { _shuffleDriverComponents.cleanupApplication() } } if (env != null && _heartbeatReceiver != null) { Utils.tryLogNonFatalError { env.rpcEnv.stop(_heartbeatReceiver) } } Utils.tryLogNonFatalError { _progressBar.foreach(_.stop()) } _taskScheduler = null // TODO: Cache.stop()? if (_env != null) { Utils.tryLogNonFatalError { _env.stop() } SparkEnv.set(null) } if (_statusStore != null) { _statusStore.close() } // Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this // `SparkContext` is stopped. localProperties.remove() ResourceProfile.clearDefaultProfile() // Unset YARN mode system env variable, to allow switching between cluster types. SparkContext.clearActiveContext() logInfo("Successfully stopped SparkContext") } /** * Get Spark's home location from either a value set through the constructor, * or the spark.home Java property, or the SPARK_HOME environment variable * (in that order of preference). If neither of these is set, return None. */ private[spark] def getSparkHome(): Option[String] = { conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME"))) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ def setCallSite(shortCallSite: String): Unit = { setLocalProperty(CallSite.SHORT_FORM, shortCallSite) } /** * Set the thread-local property for overriding the call sites * of actions and RDDs. */ private[spark] def setCallSite(callSite: CallSite): Unit = { setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm) setLocalProperty(CallSite.LONG_FORM, callSite.longForm) } /** * Clear the thread-local property for overriding the call sites * of actions and RDDs. */ def clearCallSite(): Unit = { setLocalProperty(CallSite.SHORT_FORM, null) setLocalProperty(CallSite.LONG_FORM, null) } /** * Capture the current user callsite and return a formatted version for printing. If the user * has overridden the call site using `setCallSite()`, this will return the user's version. */ private[spark] def getCallSite(): CallSite = { lazy val callSite = Utils.getCallSite() CallSite( Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm), Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm) ) } /** * Run a function on a given set of partitions in an RDD and pass the results to the given * handler function. This is the main entry point for all actions in Spark. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit): Unit = { if (stopped.get()) { throw new IllegalStateException("SparkContext has been shutdown") } val callSite = getCallSite val cleanedFunc = clean(func) logInfo("Starting job: " + callSite.shortForm) if (conf.getBoolean("spark.logLineage", false)) { logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString) } dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get) progressBar.foreach(_.finishAll()) rdd.doCheckpoint() } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * The function that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int]): Array[U] = { val results = new Array[U](partitions.size) runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res) results } /** * Run a function on a given set of partitions in an RDD and return the results as an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag]( rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]): Array[U] = { val cleanedFunc = clean(func) runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions) } /** * Run a job on all partitions in an RDD and return the results in an array. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = { runJob(rdd, func, 0 until rdd.partitions.length) } /** * Run a job on all partitions in an RDD and return the results in an array. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @return in-memory collection with a result of the job (each collection element will contain * a result from one partition) */ def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = { runJob(rdd, func, 0 until rdd.partitions.length) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. The function * that is run against each partition additionally takes `TaskContext` argument. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: (TaskContext, Iterator[T]) => U, resultHandler: (Int, U) => Unit): Unit = { runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler) } /** * Run a job on all partitions in an RDD and pass the results to a handler function. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param resultHandler callback to pass each result to */ def runJob[T, U: ClassTag]( rdd: RDD[T], processPartition: Iterator[T] => U, resultHandler: (Int, U) => Unit): Unit = { val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter) runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler) } /** * :: DeveloperApi :: * Run a job that can return approximate results. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param timeout maximum time to wait for the job, in milliseconds * @return partial result (how partial depends on whether the job was finished before or * after timeout) */ @DeveloperApi def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], timeout: Long): PartialResult[R] = { assertNotStopped() val callSite = getCallSite logInfo("Starting job: " + callSite.shortForm) val start = System.nanoTime val cleanedFunc = clean(func) val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout, localProperties.get) logInfo( "Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s") result } /** * Submit a job for execution and return a FutureJob holding the result. * * @param rdd target RDD to run tasks on * @param processPartition a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like `first()` * @param resultHandler callback to pass each result to * @param resultFunc function to be executed when the result is ready */ def submitJob[T, U, R]( rdd: RDD[T], processPartition: Iterator[T] => U, partitions: Seq[Int], resultHandler: (Int, U) => Unit, resultFunc: => R): SimpleFutureAction[R] = { assertNotStopped() val cleanF = clean(processPartition) val callSite = getCallSite val waiter = dagScheduler.submitJob( rdd, (context: TaskContext, iter: Iterator[T]) => cleanF(iter), partitions, callSite, resultHandler, localProperties.get) new SimpleFutureAction(waiter, resultFunc) } /** * Submit a map stage for execution. This is currently an internal API only, but might be * promoted to DeveloperApi in the future. */ private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C]) : SimpleFutureAction[MapOutputStatistics] = { assertNotStopped() val callSite = getCallSite() var result: MapOutputStatistics = null val waiter = dagScheduler.submitMapStage( dependency, (r: MapOutputStatistics) => { result = r }, callSite, localProperties.get) new SimpleFutureAction[MapOutputStatistics](waiter, result) } /** * Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup` * for more information. */ def cancelJobGroup(groupId: String): Unit = { assertNotStopped() dagScheduler.cancelJobGroup(groupId) } /** Cancel all jobs that have been scheduled or are running. */ def cancelAllJobs(): Unit = { assertNotStopped() dagScheduler.cancelAllJobs() } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @param reason optional reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int, reason: String): Unit = { dagScheduler.cancelJob(jobId, Option(reason)) } /** * Cancel a given job if it's scheduled or running. * * @param jobId the job ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelJob(jobId: Int): Unit = { dagScheduler.cancelJob(jobId, None) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @param reason reason for cancellation * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int, reason: String): Unit = { dagScheduler.cancelStage(stageId, Option(reason)) } /** * Cancel a given stage and all jobs associated with it. * * @param stageId the stage ID to cancel * @note Throws `InterruptedException` if the cancel message cannot be sent */ def cancelStage(stageId: Int): Unit = { dagScheduler.cancelStage(stageId, None) } /** * Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI * or through SparkListener.onTaskStart. * * @param taskId the task ID to kill. This id uniquely identifies the task attempt. * @param interruptThread whether to interrupt the thread running the task. * @param reason the reason for killing the task, which should be a short string. If a task * is killed multiple times with different reasons, only one reason will be reported. * * @return Whether the task was successfully killed. */ def killTaskAttempt( taskId: Long, interruptThread: Boolean = true, reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = { dagScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Clean a closure to make it ready to be serialized and sent to tasks * (removes unreferenced variables in $outer's, updates REPL variables) * If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively * check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt> * if not. * * @param f the closure to clean * @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability * @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not * serializable * @return the cleaned closure */ private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = { ClosureCleaner.clean(f, checkSerializable) f } /** * Set the directory under which RDDs are going to be checkpointed. * @param directory path to the directory where checkpoint files will be stored * (must be HDFS path if running in cluster) */ def setCheckpointDir(directory: String): Unit = { // If we are running on a cluster, log a warning if the directory is local. // Otherwise, the driver may attempt to reconstruct the checkpointed RDD from // its own local file system, which is incorrect because the checkpoint files // are actually on the executor machines. if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) { logWarning("Spark is not running in local mode, therefore the checkpoint directory " + s"must not be on the local filesystem. Directory '$directory' " + "appears to be on the local filesystem.") } checkpointDir = Option(directory).map { dir => val path = new Path(dir, UUID.randomUUID().toString) val fs = path.getFileSystem(hadoopConfiguration) fs.mkdirs(path) fs.getFileStatus(path).getPath.toString } } def getCheckpointDir: Option[String] = checkpointDir /** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */ def defaultParallelism: Int = { assertNotStopped() taskScheduler.defaultParallelism } /** * Default min number of partitions for Hadoop RDDs when not given by user * Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2. * The reasons for this are discussed in https://github.com/mesos/spark/pull/718 */ def defaultMinPartitions: Int = math.min(defaultParallelism, 2) private val nextShuffleId = new AtomicInteger(0) private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement() private val nextRddId = new AtomicInteger(0) /** Register a new RDD, returning its RDD ID */ private[spark] def newRddId(): Int = nextRddId.getAndIncrement() /** * Registers listeners specified in spark.extraListeners, then starts the listener bus. * This should be called after all internal listeners have been registered with the listener bus * (e.g. after the web UI and event logging listeners have been registered). */ private def setupAndStartListenerBus(): Unit = { try { conf.get(EXTRA_LISTENERS).foreach { classNames => val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf) listeners.foreach { listener => listenerBus.addToSharedQueue(listener) logInfo(s"Registered listener ${listener.getClass().getName()}") } } } catch { case e: Exception => try { stop() } finally { throw new SparkException(s"Exception when registering SparkListener", e) } } listenerBus.start(this, _env.metricsSystem) _listenerBusStarted = true } /** Post the application start event */ private def postApplicationStart(): Unit = { // Note: this code assumes that the task scheduler has been initialized and has contacted // the cluster manager to get an application ID (in case the cluster manager provides one). listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId), startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls, schedulerBackend.getDriverAttributes)) _driverLogger.foreach(_.startSync(_hadoopConfiguration)) } /** Post the application end event */ private def postApplicationEnd(): Unit = { listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis)) } /** Post the environment update event once the task scheduler is ready */ private def postEnvironmentUpdate(): Unit = { if (taskScheduler != null) { val schedulingMode = getSchedulingMode.toString val addedJarPaths = addedJars.keys.toSeq val addedFilePaths = addedFiles.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, schedulingMode, addedJarPaths, addedFilePaths) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } } /** Reports heartbeat metrics for the driver. */ private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = { val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager) executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics)) val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics] // In the driver, we do not track per-stage metrics, so use a dummy stage for the key driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics)) val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0) listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates, driverUpdates)) } // In order to prevent multiple SparkContexts from being active at the same time, mark this // context as having finished construction. // NOTE: this must be placed at the end of the SparkContext constructor. SparkContext.setActiveContext(this) } /** * The SparkContext object contains a number of implicit conversions and parameters for use with * various Spark features. */ object SparkContext extends Logging { private val VALID_LOG_LEVELS = Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN") /** * Lock that guards access to global variables that track SparkContext construction. */ private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object() /** * The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private val activeContext: AtomicReference[SparkContext] = new AtomicReference[SparkContext](null) /** * Points to a partially-constructed SparkContext if another thread is in the SparkContext * constructor, or `None` if no SparkContext is being constructed. * * Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`. */ private var contextBeingConstructed: Option[SparkContext] = None /** * Called to ensure that no other SparkContext is running in this JVM. * * Throws an exception if a running context is detected and logs a warning if another thread is * constructing a SparkContext. This warning is necessary because the current locking scheme * prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()).filter(_ ne sc).foreach { ctx => val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." + s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}" throw new SparkException(errMsg) } contextBeingConstructed.filter(_ ne sc).foreach { otherContext => // Since otherContext might point to a partially-constructed context, guard against // its creationSite field being null: val otherContextCreationSite = Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location") val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" + " constructor). This may indicate an error, since only one SparkContext should be" + " running in this JVM (see SPARK-2243)." + s" The other SparkContext was created at:\n$otherContextCreationSite" logWarning(warnMsg) } } } /** * Called to ensure that SparkContext is created or accessed only on the Driver. * * Throws an exception if a SparkContext is about to be created in executors. */ private def assertOnDriver(): Unit = { if (TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException( "SparkContext should only be created and accessed on the driver.") } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * @param config `SparkConfig` that will be used for initialisation of the `SparkContext` * @return current `SparkContext` (or a new one if it wasn't created before the function call) */ def getOrCreate(config: SparkConf): SparkContext = { // Synchronize to ensure that multiple create requests don't trigger an exception // from assertNoOtherContextIsRunning within setActiveContext SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext(config)) } else { if (config.getAll.nonEmpty) { logWarning("Using an existing SparkContext; some configuration may not take effect.") } } activeContext.get() } } /** * This function may be used to get or instantiate a SparkContext and register it as a * singleton object. Because we can only have one active SparkContext per JVM, * this is useful when applications may wish to share a SparkContext. * * This method allows not passing a SparkConf (useful if just retrieving). * * @return current `SparkContext` (or a new one if wasn't created before the function call) */ def getOrCreate(): SparkContext = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { if (activeContext.get() == null) { setActiveContext(new SparkContext()) } activeContext.get() } } /** Return the current active [[SparkContext]] if any. */ private[spark] def getActive: Option[SparkContext] = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { Option(activeContext.get()) } } /** * Called at the beginning of the SparkContext constructor to ensure that no SparkContext is * running. Throws an exception if a running context is detected and logs a warning if another * thread is constructing a SparkContext. This warning is necessary because the current locking * scheme prevents us from reliably distinguishing between cases where another context is being * constructed and cases where another constructor threw an exception. */ private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = Some(sc) } } /** * Called at the end of the SparkContext constructor to ensure that no other SparkContext has * raced with this constructor and started. */ private[spark] def setActiveContext(sc: SparkContext): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { assertNoOtherContextIsRunning(sc) contextBeingConstructed = None activeContext.set(sc) } } /** * Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's * also called in unit tests to prevent a flood of warnings from test suites that don't / can't * properly clean up their SparkContexts. */ private[spark] def clearActiveContext(): Unit = { SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized { activeContext.set(null) } } private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description" private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id" private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel" private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool" private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope" private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride" /** * Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was * changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see * SPARK-6716 for more details). */ private[spark] val DRIVER_IDENTIFIER = "driver" private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T]) : ArrayWritable = { def anyToWritable[U <: Writable](u: U): Writable = u new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]], arr.map(x => anyToWritable(x)).toArray) } /** * Find the JAR from which a given class was loaded, to make it easy for users to pass * their JARs to SparkContext. * * @param cls class that should be inside of the jar * @return jar that contains the Class, `None` if not found */ def jarOfClass(cls: Class[_]): Option[String] = { val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class") if (uri != null) { val uriStr = uri.toString if (uriStr.startsWith("jar:file:")) { // URI will be of the form "jar:file:/path/foo.jar!/package/cls.class", // so pull out the /path/foo.jar Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!'))) } else { None } } else { None } } /** * Find the JAR that contains the class of a particular object, to make it easy for users * to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in * your driver program. * * @param obj reference to an instance which class should be inside of the jar * @return jar that contains the class of the instance, `None` if not found */ def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass) /** * Creates a modified version of a SparkConf with the parameters that can be passed separately * to SparkContext, to make it easier to write SparkContext's constructors. This ignores * parameters that are passed as the default value of null, instead of throwing an exception * like SparkConf would. */ private[spark] def updatedConf( conf: SparkConf, master: String, appName: String, sparkHome: String = null, jars: Seq[String] = Nil, environment: Map[String, String] = Map()): SparkConf = { val res = conf.clone() res.setMaster(master) res.setAppName(appName) if (sparkHome != null) { res.setSparkHome(sparkHome) } if (jars != null && !jars.isEmpty) { res.setJars(jars) } res.setExecutorEnv(environment.toSeq) res } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String): Int = { numDriverCores(master, null) } /** * The number of cores available to the driver to use for tasks such as I/O with Netty */ private[spark] def numDriverCores(master: String, conf: SparkConf): Int = { def convertToInt(threads: String): Int = { if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt } master match { case "local" => 1 case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads) case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads) case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) => if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") { conf.getInt(DRIVER_CORES.key, 0) } else { 0 } case _ => 0 // Either driver is not being used, or its core count will be interpolated later } } /** * Create a task scheduler based on a given master URL. * Return a 2-tuple of the scheduler backend and the task scheduler. */ private def createTaskScheduler( sc: SparkContext, master: String, deployMode: String): (SchedulerBackend, TaskScheduler) = { import SparkMasterRegex._ // When running locally, don't try to re-execute tasks on failure. val MAX_LOCAL_TASK_FAILURES = 1 // Ensure that default executor's resources satisfies one or more tasks requirement. // This function is for cluster managers that don't set the executor cores config, for // others its checked in ResourceProfile. def checkResourcesPerTask(executorCores: Int): Unit = { val taskCores = sc.conf.get(CPUS_PER_TASK) if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) { validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores) } val defaultProf = sc.resourceProfileManager.defaultResourceProfile ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores)) } master match { case "local" => checkResourcesPerTask(1) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_REGEX(threads) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*] estimates the number of cores on the machine; local[N] uses exactly N threads. val threadCount = if (threads == "*") localCpuCount else threads.toInt if (threadCount <= 0) { throw new SparkException(s"Asked to run locally with $threadCount threads") } checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case LOCAL_N_FAILURES_REGEX(threads, maxFailures) => def localCpuCount: Int = Runtime.getRuntime.availableProcessors() // local[*, M] means the number of cores on the computer with M failures // local[N, M] means exactly N threads with M failures val threadCount = if (threads == "*") localCpuCount else threads.toInt checkResourcesPerTask(threadCount) val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true) val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount) scheduler.initialize(backend) (backend, scheduler) case SPARK_REGEX(sparkUrl) => val scheduler = new TaskSchedulerImpl(sc) val masterUrls = sparkUrl.split(",").map("spark://" + _) val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) (backend, scheduler) case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) => checkResourcesPerTask(coresPerWorker.toInt) // Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang. val memoryPerWorkerInt = memoryPerWorker.toInt if (sc.executorMemory > memoryPerWorkerInt) { throw new SparkException( "Asked to launch cluster with %d MiB RAM / worker but requested %d MiB/worker".format( memoryPerWorkerInt, sc.executorMemory)) } // For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED // to false because this mode is intended to be used for testing and in this case all the // executors are running on the same host. So if host local reading was enabled here then // testing of the remote fetching would be secondary as setting this config explicitly to // false would be required in most of the unit test (despite the fact that remote fetching // is much more frequent in production). sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false) val scheduler = new TaskSchedulerImpl(sc) val localCluster = new LocalSparkCluster( numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf) val masterUrls = localCluster.start() val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls) scheduler.initialize(backend) backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => { localCluster.stop() } (backend, scheduler) case masterUrl => val cm = getClusterManager(masterUrl) match { case Some(clusterMgr) => clusterMgr case None => throw new SparkException("Could not parse Master URL: '" + master + "'") } try { val scheduler = cm.createTaskScheduler(sc, masterUrl) val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler) cm.initialize(scheduler, backend) (backend, scheduler) } catch { case se: SparkException => throw se case NonFatal(e) => throw new SparkException("External scheduler cannot be instantiated", e) } } } private def getClusterManager(url: String): Option[ExternalClusterManager] = { val loader = Utils.getContextOrSparkClassLoader val serviceLoaders = ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url)) if (serviceLoaders.size > 1) { throw new SparkException( s"Multiple external cluster managers registered for the url $url: $serviceLoaders") } serviceLoaders.headOption } } /** * A collection of regexes for extracting information from the master string. */ private object SparkMasterRegex { // Regular expression used for local[N] and local[*] master formats val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r // Regular expression for local[N, maxRetries], used in tests with failing tasks val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r // Regular expression for simulating a Spark cluster of [N, cores, memory] locally val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r // Regular expression for connecting to Spark deploy clusters val SPARK_REGEX = """spark://(.*)""".r // Regular expression for connecting to kubernetes clusters val KUBERNETES_REGEX = """k8s://(.*)""".r } /** * A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The getter for the writable class takes a `ClassTag[T]` in case this is a generic object * that doesn't know the type of `T` when it is created. This sounds strange but is necessary to * support converting subclasses of `Writable` to themselves (`writableWritableConverter()`). */ private[spark] class WritableConverter[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: Writable => T) extends Serializable object WritableConverter { // Helper objects for converting common types to Writable private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T) : WritableConverter[T] = { val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]] new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W])) } // The following implicit functions were in SparkContext before 1.3 and users had to // `import SparkContext._` to enable them. Now we move them here to make the compiler find // them automatically. However, we still keep the old functions in SparkContext for backward // compatibility and forward to the following functions directly. // The following implicit declarations have been added on top of the very similar ones // below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta // expansion of zero-arg methods and thus won't match a no-arg method where it expects // an implicit that is a function of no args. implicit val intWritableConverterFn: () => WritableConverter[Int] = () => simpleWritableConverter[Int, IntWritable](_.get) implicit val longWritableConverterFn: () => WritableConverter[Long] = () => simpleWritableConverter[Long, LongWritable](_.get) implicit val doubleWritableConverterFn: () => WritableConverter[Double] = () => simpleWritableConverter[Double, DoubleWritable](_.get) implicit val floatWritableConverterFn: () => WritableConverter[Float] = () => simpleWritableConverter[Float, FloatWritable](_.get) implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] = () => simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = { () => simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer then data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit val stringWritableConverterFn: () => WritableConverter[String] = () => simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] = () => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) // These implicits remain included for backwards-compatibility. They fulfill the // same role as those above. implicit def intWritableConverter(): WritableConverter[Int] = simpleWritableConverter[Int, IntWritable](_.get) implicit def longWritableConverter(): WritableConverter[Long] = simpleWritableConverter[Long, LongWritable](_.get) implicit def doubleWritableConverter(): WritableConverter[Double] = simpleWritableConverter[Double, DoubleWritable](_.get) implicit def floatWritableConverter(): WritableConverter[Float] = simpleWritableConverter[Float, FloatWritable](_.get) implicit def booleanWritableConverter(): WritableConverter[Boolean] = simpleWritableConverter[Boolean, BooleanWritable](_.get) implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = { simpleWritableConverter[Array[Byte], BytesWritable] { bw => // getBytes method returns array which is longer then data to be returned Arrays.copyOfRange(bw.getBytes, 0, bw.getLength) } } implicit def stringWritableConverter(): WritableConverter[String] = simpleWritableConverter[String, Text](_.toString) implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] = new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T]) } /** * A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable` * class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the * conversion. * The `Writable` class will be used in `SequenceFileRDDFunctions`. */ private[spark] class WritableFactory[T]( val writableClass: ClassTag[T] => Class[_ <: Writable], val convert: T => Writable) extends Serializable object WritableFactory { private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W) : WritableFactory[T] = { val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]] new WritableFactory[T](_ => writableClass, convert) } implicit def intWritableFactory: WritableFactory[Int] = simpleWritableFactory(new IntWritable(_)) implicit def longWritableFactory: WritableFactory[Long] = simpleWritableFactory(new LongWritable(_)) implicit def floatWritableFactory: WritableFactory[Float] = simpleWritableFactory(new FloatWritable(_)) implicit def doubleWritableFactory: WritableFactory[Double] = simpleWritableFactory(new DoubleWritable(_)) implicit def booleanWritableFactory: WritableFactory[Boolean] = simpleWritableFactory(new BooleanWritable(_)) implicit def bytesWritableFactory: WritableFactory[Array[Byte]] = simpleWritableFactory(new BytesWritable(_)) implicit def stringWritableFactory: WritableFactory[String] = simpleWritableFactory(new Text(_)) implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] = simpleWritableFactory(w => w) }
rednaxelafx/apache-spark
core/src/main/scala/org/apache/spark/SparkContext.scala
Scala
apache-2.0
124,727
/******************************************************************************* Copyright (c) 2013, S-Core, KAIST. All rights reserved. Use is subject to license terms. This distribution may include materials developed by third parties. ******************************************************************************/ package kr.ac.kaist.jsaf.analysis.typing.models.DOMCore import kr.ac.kaist.jsaf.analysis.typing.domain._ import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T} import kr.ac.kaist.jsaf.analysis.typing.models._ import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue import kr.ac.kaist.jsaf.analysis.typing.AddressManager._ object DOMNotation extends DOM { private val name = "Notation" /* predefined locatoins */ val loc_cons = newSystemRecentLoc(name + "Cons") val loc_proto = newSystemRecentLoc(name + "Proto") val loc_ins = newSystemRecentLoc(name + "Ins") /* constructor or object*/ private val prop_cons: List[(String, AbsProperty)] = List( ("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))), ("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))), ("@extensible", AbsConstValue(PropValue(BoolTrue))), ("@hasinstance", AbsConstValue(PropValueNullTop)), ("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))), ("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F)))) ) /* global */ private val prop_global: List[(String, AbsProperty)] = List( (name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T)))) ) def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List( (loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global) ) /* prorotype */ private val prop_proto: List[(String, AbsProperty)] = List( ("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))), ("@proto", AbsConstValue(PropValue(ObjectValue(Value(DOMNode.loc_proto), F, F, F)))), ("@extensible", AbsConstValue(PropValue(BoolTrue))) ) def getSemanticMap(): Map[String, SemanticFun] = { Map() } def getPreSemanticMap(): Map[String, SemanticFun] = { Map() } def getDefMap(): Map[String, AccessFun] = { Map() } def getUseMap(): Map[String, AccessFun] = { Map() } /* semantics */ // no function /* instance */ //def instantiate() = Unit // not yet implemented // intance of DOMNotation should have 'publicId', 'systemId' property }
darkrsw/safe
src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMCore/DOMNotation.scala
Scala
bsd-3-clause
2,521
package com.phosphene.kafkastorm.storm import backtype.storm.topology.base.BaseBasicBolt import backtype.storm.topology.{BasicOutputCollector, OutputFieldsDeclarer} import backtype.storm.tuple.{Fields, Tuple, Values} import com.google.common.base.Throwables import com.twitter.bijection.avro.SpecificAvroCodecs import com.twitter.bijection.Injection import org.apache.avro.specific.SpecificRecordBase import org.slf4j.{Logger, LoggerFactory} import scala.util.{Try, Failure, Success} /** * An binaryAvro->pojoAvro converter bolt. * * This bolt expects incoming data in Avro-encoded binary format, serialized according to the Avro schema of `T`. It * will deserialize the incoming data into a `T` pojo, and emit this pojo to downstream consumers. As such this bolt * can be considered the Storm equivalent of Twitter Bijection's `Injection.invert[T, Array[Byte]](bytes)` for * Avro data. * * By using this bolt you don't need to write another decoder bolt just because the bolt needs to handle a different * Avro schema. * * @example {{{ * import backtype.storm.topology.TopologyBuilder * import com.phosphene.avro.Tweet * * val builder = new TopologyBuilder * // ...spout is set up here... * val decoderBolt = new AvroDecoderBolt[Stashy] * builder.setBolt(decoderBoltId, decoderBolt).shuffleGrouping(spoutId) // or whatever grouping you need * }}} * * @param inputField The name of the field in the input tuple to read from. (Default: "bytes") * @param outputField The name of the field in the output tuple to write to. (Default: "pojo") * @tparam T The type of the Avro record (e.g. a `Stashy`) based on the underlying Avro schema being used. Must be * a subclass of Avro's `SpecificRecordBase`. */ class AvroDecoderBolt[T <: SpecificRecordBase : Manifest]( inputField: String = "bytes", outputField: String = "pojo") extends BaseBasicBolt { // Note: Ideally we would like to use TypeTag's instead of Manifest's here. Doing so would only require replacing // `manifest[T]` with `typeOf[T]`, and adding AvroDecoderBolt[T : TypeTag]. Unfortunately there is a known // serialization bug in Scala's TypeTag implementation that will trigger runtime exceptions when submitting/running // this class in a Storm topology. // // See "SI-5919: Type tags (and Exprs as well) should be serializable" (https://issues.scala-lang.org/browse/SI-5919) val tpe = manifest[T] // Must be transient because Logger is not serializable @transient lazy private val log: Logger = LoggerFactory.getLogger(classOf[AvroDecoderBolt[T]]) // Must be transient because Injection is not serializable. Must be implicit because that's how Injection works. @transient lazy implicit private val specificAvroBinaryInjection: Injection[T, Array[Byte]] = SpecificAvroCodecs.toBinary[T] override def execute(tuple: Tuple, collector: BasicOutputCollector) { val readTry = Try(tuple.getBinaryByField(inputField)) readTry match { case Success(bytes) if bytes != null => decodeAndSinkToKafka(bytes, collector) case Success(_) => log.error("Reading from input tuple returned null") case Failure(e) => log.error("Could not read from input tuple: " + Throwables.getStackTraceAsString(e)) } } private def decodeAndSinkToKafka(bytes: Array[Byte], collector: BasicOutputCollector) { require(bytes != null, "bytes must not be null") val decodeTry = Injection.invert[T, Array[Byte]](bytes) decodeTry match { case Success(pojo) => log.debug("Binary data decoded into pojo: " + pojo) collector.emit(new Values(pojo)) case Failure(e) => log.error("Could not decode binary data: " + Throwables.getStackTraceAsString(e)) } } override def declareOutputFields(declarer: OutputFieldsDeclarer) { declarer.declare(new Fields(outputField)) } } object AvroDecoderBolt { /** * Factory method for Java interoperability. * * @example {{{ * // in Java * AvroDecoderBolt decoderBolt = AvroDecoderBolt.ofType(Stashy.class); * }}} * * @param cls * @tparam T * @return */ def ofType[T <: SpecificRecordBase](cls: java.lang.Class[T]) = { val manifest = Manifest.classType[T](cls) newInstance[T](manifest) } private def newInstance[T <: SpecificRecordBase : Manifest] = new AvroDecoderBolt[T] }
phosphene/kafka-storm-test-demo
src/main/scala/com/phosphene/kafkastorm/storm/AvroDecoderBolt.scala
Scala
apache-2.0
4,348
package org.clulab.odin.impl import org.clulab.struct.Interval import org.clulab.processors.Document import org.clulab.odin._ trait GraphPattern { def arguments: Seq[ArgumentPattern] // separate the required and optional arguments protected val (required, optional) = arguments.partition(_.required) type Args = Map[String, Seq[Mention]] type Paths = Map[String, Map[Mention, SynPath]] val config: OdinConfig def getMentions( sent: Int, doc: Document, state: State, labels: Seq[String], keep: Boolean, ruleName: String ): Seq[Mention] protected def extractArguments( tokens: Interval, sent: Int, doc: Document, state: State ): Seq[(Args, Paths)] = { // extract required arguments val reqExtractions = extractArguments(required, tokens, sent, doc, state) // if not all required arguments were found, return Nil val reqNames = required.map(_.name) val foundAllRequired = reqNames.forall(reqExtractions.contains) if (!foundAllRequired) return Nil // get the arguments out of the extraction // while preserving the extraction groups val reqArgs: Seq[Seq[(String, Seq[Mention])]] = for { (name, mentionsWithPathsGroups) <- reqExtractions.toSeq } yield mentionsWithPathsGroups.map(g => name -> g.map(_._1)) // get the paths, resulting in an unserializable MapLike val reqPaths = reqExtractions.mapValues(_.flatten.toMap) // extract optional arguments val optExtractions = extractArguments(optional, tokens, sent, doc, state) // get the arguments out of the extraction val optArgs: Seq[Seq[(String, Seq[Mention])]] = for { (name, mentionsWithPathsGroups) <- optExtractions.toSeq } yield mentionsWithPathsGroups.map(g => name -> g.map(_._1)) // get the paths, resulting in an unserializable MapLike val optPaths = optExtractions.mapValues(_.flatten.toMap) // group the paths together, ensuring the result is a serializable Map val paths: Paths = Map.empty ++ reqPaths ++ optPaths // group the arguments together val args: Seq[Seq[(String, Seq[Mention])]] = reqArgs ++ optArgs // return cartesian product of arguments product(args).map(a => (a.toMap, paths)) } // Extracts the given arguments from any of the tokens in the interval. // Recall that each argument has arity, and their extractions are grouped // according to this arity. The extraction is represented as Seq[Seq[(Mention, SynPath)]]] // containing a sequence of extracted groups, each group has one or more (mention, syntactic path) tuples. // This function returns a map from argument name to extraction. private def extractArguments( arguments: Seq[ArgumentPattern], tokens: Interval, sent: Int, doc: Document, state: State ): Map[String, Seq[Seq[(Mention, SynPath)]]] = { val extractions = for { a <- arguments t <- tokens results = a.extract(t, sent, doc, state) if results.nonEmpty } yield (a.name -> results) extractions.toMap } // cartesian product // from: List(List(x1, x2, x3), List(y1, y2)) // to: List(List(x1, y1), List(x1, y2), List(x2, y1), List(x2, y2), List(x3, y1), List(x3, y2)) private def product[A](xss: Seq[Seq[A]]) = xss.foldRight(Seq(Seq[A]())) { (xs, lla) => xs.flatMap(x => lla.map(x +: _)) } } // creates an EventMention using a TokenPattern for the trigger class TriggerPatternGraphPattern( val trigger: TokenPattern, val arguments: Seq[ArgumentPattern], val config: OdinConfig ) extends GraphPattern { def getMentions( sent: Int, doc: Document, state: State, labels: Seq[String], keep: Boolean, ruleName: String ): Seq[Mention] = for { r <- trigger.findAllIn(sent, doc, state) trig = new TextBoundMention(labels, Interval(r.start, r.end), sent, doc, keep, ruleName) (args, paths) <- extractArguments(trig.tokenInterval, sent, doc, state) } yield new EventMention(labels, mkTokenInterval(trig, args), trig, args, paths, sent, doc, keep, ruleName) } // creates an EventMention by matching trigger mentions class TriggerMentionGraphPattern( val triggerLabel: String, val arguments: Seq[ArgumentPattern], val config: OdinConfig ) extends GraphPattern { def getMentions( sent: Int, doc: Document, state: State, labels: Seq[String], keep: Boolean, ruleName: String ): Seq[Mention] = for { mention <- state.mentionsFor(sent) if mention matches triggerLabel if mention.isInstanceOf[TextBoundMention] trig = mention.asInstanceOf[TextBoundMention] (args, paths) <- extractArguments(trig.tokenInterval, sent, doc, state) } yield new EventMention(labels, mkTokenInterval(trig, args), trig, args, paths, sent, doc, keep, ruleName) } // creates a RelationMention by matching mentions class RelationGraphPattern( val anchorName: String, val anchorLabel: String, val arguments: Seq[ArgumentPattern], val config: OdinConfig ) extends GraphPattern { def getMentions( sent: Int, doc: Document, state: State, labels: Seq[String], keep: Boolean, ruleName: String ): Seq[Mention] = for { mention <- state.mentionsFor(sent) if mention matches anchorLabel (args, paths) <- extractArguments(mention.tokenInterval, sent, doc, state) relationArgs = args + (anchorName -> Seq(mention)) relationPaths = paths + (anchorName -> Map(mention -> Nil)) } yield new RelationMention(labels, mkTokenInterval(relationArgs), relationArgs, relationPaths, sent, doc, keep, ruleName) }
sistanlp/processors
main/src/main/scala/org/clulab/odin/impl/GraphPattern.scala
Scala
apache-2.0
5,656
package simx.components.ai.atn.aspects import simx.components.ai.atn.ontology.{types => local} import simx.core.entity.description.{EntityAspect, NamedSValSet, SValSet} import simx.core.entity.typeconversion.ConvertibleTrait import simx.core.ontology.{EntityDescription, Symbols} case class StateDescription(id: Symbol, register: SValSet) extends EntityDescription ( "ATN_State with ID: " + id.name, StateAspect(register) ){ require(id != null, "the Parameter 'id' must not be null!") require(register != null, "the Parameter 'register' must not be null!") } case class StateAspect(register: SValSet, override val targets: List[Symbol] = Nil) extends EntityAspect(Symbols.atn, Symbols.state, targets) { def getFeatures: Set[ConvertibleTrait[_]] = Set(local.Register) def getProvidings: Set[ConvertibleTrait[_]] = Set(local.Register) def getCreateParams: NamedSValSet = addCVars(SValSet(local.Register(register))) }
simulator-x/atn
src/simx/components/ai/atn/aspects/StateAspect.scala
Scala
apache-2.0
941
package controllers.api.singer import controllers.actions.{WithSinger, WithDBSession} import models.{SessionSongId, SongId, SessionSongFormatter} import play.api.db.slick.DBAction import play.api.libs.json.{JsError, JsSuccess, Json} import play.api.mvc.{Action, Controller} import repositories.{SessionRepositoryComponent, SingerRepositoryComponent, SessionSongRepositoryComponent} import repositories.SessionSongRepositoryMessages.{GuestRequestSongRequest, SessionSongComponentFormatter, RequestSongRequest} import services.SessionServiceComponent import scalaz._ trait SessionSongController extends Controller with WithDBSession with WithSinger { this: SessionSongRepositoryComponent with SessionServiceComponent with SingerRepositoryComponent => import SessionSongComponentFormatter._ import SessionSongFormatter._ def guestRequestSong() = DBAction(parse.tolerantJson) { req => implicit val db = req.dbSession val songRequest = Json.fromJson[GuestRequestSongRequest](req.body) songRequest match { case JsSuccess(r, p) => sessionService.guestRequestSong(r) match { case Success(song) => Created(Json.toJson(song)) case Failure(error) => BadRequest(error.toString) } case JsError(e) => BadRequest(e.toString()) case _ => BadRequest("wtf") } } def requestSong() = Action(parse.tolerantJson) { req => WithSinger(req) { (singer, dbSession) => implicit val db = dbSession val songRequest = Json.fromJson[RequestSongRequest](req.body) songRequest match { case JsSuccess(r, p) => sessionService.requestSong(r, singer) match { case Success(song) => Created(Json.toJson(song)) case Failure(error) => BadRequest(error.toString) } case JsError(e) => BadRequest(e.toString()) case _ => BadRequest("wtf") } } } def activeSongs() = Action { req => WithSinger(req) { (singer, dbSession) => implicit val db = dbSession Ok(Json.toJson(sessionSongRepository.activeSongsBySinger(singer.id.get))) } } def completedSongs() = Action { req => WithSinger(req) { (singer, dbSession) => implicit val db = dbSession Ok(Json.toJson(sessionSongRepository.completedSongsBySinger(singer.id.get))) } } def cancel(songId: SessionSongId) = Action { req => WithSinger(req) { (singer, dbSession) => implicit val dbs = dbSession sessionSongRepository.cancelSong(songId, singer.id.get) match { case Success(song) => Ok(Json.toJson(song)) case Failure(error) => BadRequest(error.toString) } } } } object SessionSongController extends SessionSongController with SingerRepositoryComponent with SessionSongRepositoryComponent with SessionRepositoryComponent with SessionServiceComponent
nagirrab/Karaoke
app/controllers/api/singer/SessionSongController.scala
Scala
mit
2,853
/* * Copyright 2010-2014 Benjamin Lings * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.codingwell.scalaguice import com.google.inject.name.{Named, Names} import com.google.inject.{AbstractModule, Guice} import net.codingwell.scalaguice.InjectorExtensions._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec import scala.collection.{immutable => im} class ScalaMultibinderSpec extends AnyWordSpec with Matchers { private case class W[T](t: T) private val annotation = Names.named("N") "A multibinder" should { "bind empty [T]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder[String](binder) } } validate(Guice.createInjector(module).instance[im.Set[String]]) } "bind [TypeLiteral]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, typeLiteral[String]) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String]], "A", "B") } "bind [Class]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, classOf[String]) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String]], "A", "B") } "bind [TypeLiteral, Annotation]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, typeLiteral[String], annotation) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String]](annotation), "A", "B") } "bind [Class, Annotation]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, classOf[String], annotation) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String]](annotation), "A", "B") } "bind [TypeLiteral, ClassAnnotation]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, typeLiteral[String], classOf[Named]) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String], Named], "A", "B") } "bind [Class, ClassAnnotation]" in { import com.google.inject.name.Named val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, classOf[String], classOf[Named]) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String], Named], "A", "B") } "deduplicate" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, typeLiteral[Symbol]) multi.addBinding.toInstance(Symbol("A")) multi.addBinding.toInstance(Symbol("A")) } } validate(Guice.createInjector(module).instance[im.Set[Symbol]], Symbol("A")) } "permit duplicates" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder(binder, typeLiteral[Symbol]).permitDuplicates() multi.addBinding.toInstance(Symbol("A")) } } validate(Guice.createInjector(module).instance[im.Set[Symbol]], Symbol("A")) } "bind from multiple modules" in { def newModule(i: Int) = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder[Int](binder) multi.addBinding.toInstance(i) } } validate(Guice.createInjector(newModule(1), newModule(2)).instance[im.Set[Int]], 1, 2) } "bind deep parameterization in [Class]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder(binder, classOf[W[String]]) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder(binder, classOf[W[Int]]) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]]], W("A")) validate(injector.instance[im.Set[W[Int]]], W(1)) } "bind deep parameterization in [Class, Annotation]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder(binder, classOf[W[String]], annotation) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder(binder, classOf[W[Int]], annotation) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]]](annotation), W("A")) validate(injector.instance[im.Set[W[Int]]](annotation), W(1)) } "bind deep parameterization in [Class, ClassAnnotation]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder(binder, classOf[W[String]], classOf[Named]) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder(binder, classOf[W[Int]], classOf[Named]) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]], Named], W("A")) validate(injector.instance[im.Set[W[Int]], Named], W(1)) } "bind deep parameterization in [TypeLiteral]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder(binder, typeLiteral[W[String]]) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder(binder, typeLiteral[W[Int]]) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]]], W("A")) validate(injector.instance[im.Set[W[Int]]], W(1)) } "bind deep parameterization in [typeLiteral, Annotation]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder(binder, typeLiteral[W[String]], annotation) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder(binder, typeLiteral[W[Int]], annotation) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]]](annotation), W("A")) validate(injector.instance[im.Set[W[Int]]](annotation), W(1)) } "bind deep parameterization in [TypeLiteral, ClassAnnotation]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder(binder, typeLiteral[W[String]], classOf[Named]) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder(binder, typeLiteral[W[Int]], classOf[Named]) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]], Named], W("A")) validate(injector.instance[im.Set[W[Int]], Named], W(1)) } /** Scala Addons */ "bind [T]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder[String](binder) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String]], "A", "B") } "bind [T, Ann]" in { import com.google.inject.name.Named val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder[String, Named](binder) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String], Named], "A", "B") } "bind [T](Ann)" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val multi = ScalaMultibinder.newSetBinder[String](binder, annotation) multi.addBinding.toInstance("A") multi.addBinding.toInstance("B") } } validate(Guice.createInjector(module).instance[im.Set[String]](annotation), "A", "B") } "bind deep parameterization in [T]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder[W[String]](binder) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder[W[Int]](binder) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]]], W("A")) validate(injector.instance[im.Set[W[Int]]], W(1)) } "bind deep parameterization in [T, Ann]" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder[W[String], Named](binder) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder[W[Int], Named](binder) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]], Named], W("A")) validate(injector.instance[im.Set[W[Int]], Named], W(1)) } "bind deep parameterization in [T](annotation)" in { val module = new AbstractModule with ScalaModule { override def configure(): Unit = { val mbStrings = ScalaMultibinder.newSetBinder[W[String]](binder, annotation) mbStrings.addBinding.toInstance(W("A")) val mbInts = ScalaMultibinder.newSetBinder[W[Int]](binder, annotation) mbInts.addBinding.toInstance(W(1)) } } val injector = Guice.createInjector(module) validate(injector.instance[im.Set[W[String]]](annotation), W("A")) validate(injector.instance[im.Set[W[Int]]](annotation), W(1)) } } private def validate[T](set: Set[T], expected: T*): Unit = { set should have size expected.length for (e <- expected) { set should contain(e) } } }
codingwell/scala-guice
src/test/scala/net/codingwell/scalaguice/ScalaMultibinderSpec.scala
Scala
apache-2.0
12,257
/* * Copyright 2016 Dennis Vriend * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package akka.persistence.jdbc.configuration import akka.persistence.jdbc.TestSpec import akka.persistence.jdbc.extension._ import com.typesafe.config.ConfigFactory class AkkaPersistenceConfigTest extends TestSpec { "JournalTableConfig" should "be parsed with no schemaName" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | journal { | tableName = "journal" | schemaName = "" | columnNames { | } | } | } |} """.stripMargin ) JournalTableConfiguration(config) shouldBe JournalTableConfiguration( "journal", None, JournalTableColumnNames("persistence_id", "sequence_nr", "created", "tags", "message") ) } it should "map custom column names" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | journal { | tableName = "events" | schemaName = "" | columnNames { | persistenceId = "pid" | sequenceNumber = "seqno" | created = "millis_from_epoch" | tags = "event_tags" | message = "event" | } | } | } |} """.stripMargin ) JournalTableConfiguration(config) shouldBe JournalTableConfiguration( "events", None, JournalTableColumnNames("pid", "seqno", "millis_from_epoch", "event_tags", "event") ) } it should "be parsed with a schemaName" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | journal { | tableName = "journal" | schemaName = "public" | columnNames { | } | } | } |} """.stripMargin ) JournalTableConfiguration(config) shouldBe JournalTableConfiguration( "journal", Some("public"), JournalTableColumnNames("persistence_id", "sequence_nr", "created", "tags", "message") ) } "DeletedToTableConfiguration" should "be parsed with no schema name" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | deletedTo { | tableName = "deleted_to" | schemaName = "" | columnNames { | } | } | } |} """.stripMargin ) DeletedToTableConfiguration(config) shouldBe DeletedToTableConfiguration( "deleted_to", None, DeletedToTableColumnNames( "persistence_id", "deleted_to" ) ) } it should "map custom column names" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | deletedTo { | tableName = "deleted_to" | schemaName = "" | columnNames = { | persistenceId = "pid" | deletedTo = "removed_to" | } | } | } |} """.stripMargin ) DeletedToTableConfiguration(config) shouldBe DeletedToTableConfiguration( "deleted_to", None, DeletedToTableColumnNames( "pid", "removed_to" ) ) } it should "be parsed with a schemaName" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | deletedTo { | tableName = "deleted_to" | schemaName = "public" | columnNames { | } | } | } |} """.stripMargin ) DeletedToTableConfiguration(config) shouldBe DeletedToTableConfiguration( "deleted_to", Some("public"), DeletedToTableColumnNames( "persistence_id", "deleted_to" ) ) } "SnapshotTableConfiguration" should "be parsed with no schemaName" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | snapshot { | tableName = "snapshot" | schemaName = "" | columnNames { | } | } | } |} """.stripMargin ) SnapshotTableConfiguration(config) shouldBe SnapshotTableConfiguration( "snapshot", None, SnapshotTableColumnNames( "persistence_id", "sequence_nr", "created", "snapshot" ) ) } it should "map custom column names" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | snapshot { | tableName = "snapshot" | schemaName = "" | columnNames { | persistenceId = "pid" | sequenceNumber = "seqno" | created = "millis_from_epoch" | snapshot = "data" | } | } | } |} """.stripMargin ) SnapshotTableConfiguration(config) shouldBe SnapshotTableConfiguration( "snapshot", None, SnapshotTableColumnNames( "pid", "seqno", "millis_from_epoch", "data" ) ) } it should "be parsed with a schemaName" in { val config = ConfigFactory.parseString( """ |akka-persistence-jdbc { | tables { | snapshot { | tableName = "snapshot" | schemaName = "public" | columnNames { | } | } | } |} """.stripMargin ) SnapshotTableConfiguration(config) shouldBe SnapshotTableConfiguration( "snapshot", Some("public"), SnapshotTableColumnNames( "persistence_id", "sequence_nr", "created", "snapshot" ) ) } }
wwwiiilll/akka-persistence-jdbc
src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala
Scala
apache-2.0
6,633
package de.hyronx.matter.library import org.scalatest._ case class TreeImpl(val value: Int) extends MutableTree[TreeImpl] class TreeTest extends FlatSpec with Matchers { val tree = MutableTree( new TreeImpl(0), MutableTree( new TreeImpl(1), new TreeImpl(2), new TreeImpl(3)), MutableTree( new TreeImpl(4), new TreeImpl(5))) "A tree" should "find a nested child" in { val target = 3 val result = tree.find(_.value == target) assert(result.isDefined && result.get.value == target) } it should "add a child and find it afterwards" in { val value = 6 tree.addChild(new TreeImpl(value)) assert(tree.lastChild.isDefined && tree.lastChild.get.value == value) } it should "know it's root" in { assert(tree.isRoot) } it should "find its root" in { val target = 0 val result = tree.find(_ == new TreeImpl(target)) assert(result.isDefined && result.get.value == target) } it should "know if it's not the root" in { assert(!tree.children.head.isRoot) } it should "recognize its leafs" in { val target = 5 val result = tree.find(_.value == target) assert(result.isDefined && result.get.isLeaf) } it should "build a path correctly" in { val target = 2 val expectation = Stream( tree, tree.children.head, tree.children.head.children.head) val result = tree.find(_.value == target) assert(result.isDefined && result.get.path.equals(expectation)) } it should "collect an element correctly" in { val target = tree.children.head.children.last val result = tree.collectFirst { case o@TreeImpl(x) if x == 3 ⇒ o } assert(result.isDefined && result.get == target) } it should "correctly iterate over each child" in { val target = List(1, 2, 3, 4, 5, 6) var result = List.empty[Int] tree.foreach { child ⇒ result = result :+ child.value } assert(result == target) } it should "correctly iterate over each child and itself" in { val target = List(0, 1, 2, 3, 4, 5, 6) var result = List.empty[Int] tree.foreach({ child ⇒ result = result :+ child.value }, true) assert(result == target) } /* it should "correctly replace a node" in { val target = 100 val result = tree.find(_.value == 3).map { node => node.replaceNode(node.clone(value = 100)) } } * */ }
hyronx/matter
src/test/scala/de/hyronx/matter/library/TreeTest.scala
Scala
apache-2.0
2,400
package eu.stratosphere.procrustes.datagen.spark import eu.stratosphere.procrustes.datagen.util.Distributions._ import eu.stratosphere.procrustes.datagen.util.RanHash import org.apache.spark.{SparkConf, SparkContext} class SparkIntGenerator(master: String, numTasks: Int, tuplesPerTask: Long, keyDist: Distribution, output: String) { import SparkIntGenerator.SEED def run() = { val conf = new SparkConf().setAppName("integer-generator").setMaster(master) val sc = new SparkContext(conf) val n = tuplesPerTask val seed = SEED val kd = this.keyDist val dataset = sc.parallelize(0 until numTasks, numTasks).flatMap(i => { val partitionStart = n * i // the index of the first point in the current partition val randStart = partitionStart val rand = new RanHash(seed) rand.skipTo(seed + randStart) println(s"task $i generating the range from $partitionStart until ${partitionStart + n}") // val result = new Traversable[Int] { // override def foreach[U](f: (Int) => U): Unit = { // for (j <- partitionStart until (partitionStart + n)) yield { // if (j % 1000 == 0) println(s"$i at pos $j (${(j - partitionStart) / (n * 1.0)}% ready)") // Math.round(kd.sample(rand)) // } // } // } val result = for (j <- partitionStart until (partitionStart + n)) yield { if (j % 1000 == 0) println(s"$i at pos $j (${(j - partitionStart) / (n * 1.0)}% ready)") Math.round(kd.sample(rand)) } println("DONE!!!!") result }) dataset.saveAsTextFile(output) sc.stop() } } object SparkIntGenerator { val SEED = 5431423142056L object Patterns { val Uniform = """Uniform\\[(\\d+)\\]""".r val Gaussian = """Gaussian\\[(\\d+),(\\d+)\\]""".r val Pareto = """Pareto\\[(\\d+)\\]""".r val TruncIntPareto = """TruncIntPareto\\[(\\d+),(\\d+)\\]""".r } def main(args: Array[String]): Unit = { if (args.length != 5) { throw new RuntimeException("Arguments count != 6") } val master: String = args(0) val numTasks: Int = args(1).toInt val tuplesPerTask: Int = args(2).toInt val keyDist: Distribution = parseDist(args(3)) val output: String = args(4) val generator = new SparkIntGenerator(master, numTasks, tuplesPerTask, keyDist, output) generator.run() } def parseDist(s: String): Distribution = s match { case Patterns.Pareto(a) => Pareto(a.toDouble) case Patterns.Gaussian(a, b) => Gaussian(a.toDouble, b.toDouble) case Patterns.Uniform(a) => Uniform(a.toInt) case Patterns.TruncIntPareto(a, b) => TruncatedIntPareto(a.toInt, b.toInt) case _ => Pareto(1) } }
stratosphere/tamara-msc-thesis
tamaras-msc-thesis-spark-datagens/src/main/scala/eu/stratosphere/procrustes/datagen/spark/SparkIntGenerator.scala
Scala
apache-2.0
2,692
trait Functor[F[_]] { def fmap[A,B](fun: A=>B, arg:F[A]): F[B] } object Functor{ implicit val ListFunctor: Functor[List] = new Functor[List] { def fmap[A, B](f: A => B, arg: List[A]):List[B] = arg map f } final class OOFunctor[F[_],A](arg:F[A])(implicit ftr: Functor[F]) { def fmap[B](fun: A=>B):F[B] = ftr.fmap(fun,arg) } //breaks if uncommented implicit def lifttoOO[F[_],A](arg:F[A])(implicit ftr: Functor[F]): OOFunctor[F,A] = new OOFunctor[F,A](arg)(ftr) //works if uncommented //implicit def liftListtoOO[A](arg:List[A]):OOFunctor[List,A] = new OOFunctor[List,A](arg) } object GeneralLiftingDemo extends App { import Functor.* val l = List(1,2,3) val res = l fmap( 1+_) // TODO: should not need explicit call to lifttoOO println("OO : " + res ) }
dotty-staging/dotty
tests/pos/tcpoly_infer_ticket716.scala
Scala
apache-2.0
792
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * File modified by Hortonworks, Inc. Modifications are also licensed under * the Apache Software License, Version 2.0. */ package org.apache.spark.sql.execution.datasources.hbase import scala.collection.mutable import org.json4s.JsonAST.JObject import org.json4s.jackson.JsonMethods._ import org.apache.avro.Schema import org.apache.hadoop.hbase.util.Bytes import org.apache.spark.sql.SQLContext import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.types._ import org.apache.spark.sql.execution.datasources.hbase.types._ case class CatalogVersion(major: Int, minor: Int) extends Comparable[CatalogVersion] { override def compareTo(o: CatalogVersion): Int = { if (major > o.major) 1 else if (major == o.major) minor - o.minor else -1 } override def toString: String = major + "." + minor } object CatalogVersion { def apply(s: String): CatalogVersion = { // Valid versions: "1.3", "1" // Invalid versions: ".3" if (!s.matches("^[0-9]{1,9}(\\\\.[0-9]{1,9})?$")) throw new IllegalArgumentException("Invalid version: " + s) val arr: Array[String] = s.split("\\\\.") var m: Int = 0 var n: Int = 0 try { m = Integer.parseInt(arr(0)) // must always have a major version number if (arr(1) != null && !arr(1).isEmpty) // minor version number is optional n = Integer.parseInt(arr(1)) } catch { case e: NumberFormatException => throw new IllegalArgumentException("Invalid version: " + s) } CatalogVersion(m, n) } } // The definition of each column cell, which may be composite type case class Field( colName: String, cf: String, col: String, fCoder: String, sType: Option[String] = None, avroSchema: Option[String] = None, len: Int = -1) extends Logging { val isRowKey = cf == HBaseTableCatalog.rowKey var start: Int = _ def schema: Option[Schema] = avroSchema.map { x => logDebug(s"avro: $x") val p = new Schema.Parser p.parse(x) } lazy val exeSchema = schema // converter from avro to catalyst structure lazy val avroToCatalyst: Option[Any => Any] = { schema.map(SchemaConverters.createConverterToSQL) } // converter from catalyst to avro lazy val catalystToAvro: (Any) => Any ={ SchemaConverters.createConverterToAvro(dt, colName, "recordNamespace") } val dt = if (avroSchema.isDefined) schema.map(SchemaConverters.toSqlType(_).dataType).get else sType.map(CatalystSqlParser.parseDataType).get val length: Int = { if (len == -1) { dt match { case BinaryType | StringType => -1 case BooleanType => Bytes.SIZEOF_BOOLEAN case ByteType => 1 case DoubleType => Bytes.SIZEOF_DOUBLE case FloatType => Bytes.SIZEOF_FLOAT case IntegerType => Bytes.SIZEOF_INT case LongType => Bytes.SIZEOF_LONG case ShortType => Bytes.SIZEOF_SHORT case _ => -1 } } else { len } } override def equals(other: Any): Boolean = other match { case that: Field => colName == that.colName && cf == that.cf && col == that.col case _ => false } } // The row key definition, with each key refer to the col defined in Field, e.g., // key1:key2:key3 case class RowKey(k: String) { val keys = k.split(":") var fields: Seq[Field] = _ var varLength = false def length = { val tmp = fields.foldLeft(0) { case (x, y) => val yLen = if (y.length == -1) { MaxLength } else { y.length } x + yLen } tmp } } // The map between the column presented to Spark and the HBase field case class SchemaMap(map: mutable.LinkedHashMap[String, Field]) { def toFields = map.map { case (name, field) => StructField(name, field.dt) }.toSeq def fields = map.values def getField(name: String) = map(name) } // The definition of HBase and Relation relation schema case class HBaseTableCatalog( val namespace: String, val name: String, row: RowKey, sMap: SchemaMap, tCoder: String, coderSet: Set[String], val numReg: Int, val splitRange: (String, String)) extends Logging { def toDataType = StructType(sMap.toFields) def getField(name: String) = sMap.getField(name) def getRowKey: Seq[Field] = row.fields def getPrimaryKey= row.keys(0) def getColumnFamilies = { sMap.fields.map(_.cf).filter(_ != HBaseTableCatalog.rowKey).toSeq.distinct } //this is required to read fromBytes column families and qualifiers val stringField = Field("","","",tCoder,Some("string")) val shcTableCoder = SHCDataTypeFactory.create(stringField) def initRowKey() = { val fields = sMap.fields.filter(_.cf == HBaseTableCatalog.rowKey) row.fields = row.keys.flatMap(n => fields.find(_.col == n)) // If the tCoder is PrimitiveType, We only allowed there is one key at the end // that is determined at runtime. if (tCoder == SparkHBaseConf.PrimitiveType) { if (row.fields.reverse.tail.filter(_.length == -1).isEmpty) { var start = 0 row.fields.foreach { f => f.start = start start += f.length } } else { throw new Exception("PrimitiveType: only the last dimension of RowKey is allowed to have " + "varied length. You may want to add 'length' to the dimensions which have " + "varied length or use dimensions which are scala/java primitive data " + "types of fixed length.") } } } initRowKey() def validateCatalogDef() = { if (!shcTableCoder.isRowKeySupported()) { throw new UnsupportedOperationException(s"$tCoder does not support row key, and can not be " + s"the table coder.") } if (coderSet.size > 1){ // Only Avro can be used with anther coder if (!coderSet.contains(SparkHBaseConf.Avro)) throw new UnsupportedOperationException("Two different coders can not be " + "used to encode/decode the same Hbase table") } // If the row key of the table is composite, check if the coder supports composite key if (row.fields.size > 1 && !shcTableCoder.isCompositeKeySupported) throw new UnsupportedOperationException(s"$tCoder: Composite key is not supported") } validateCatalogDef() } class CatalogDefinitionException(msg: String) extends Exception(msg) object HBaseTableCatalog { val newTable = "newtable" // The json string specifying hbase catalog information val tableCatalog = "catalog" // The row key with format key1:key2 specifying table row key val rowKey = "rowkey" // The key for hbase table whose value specify namespace and table name val table = "table" // The namespace of hbase table val nameSpace = "namespace" // The name of hbase table val tableName = "name" // The name of columns in hbase catalog val columns = "columns" val cf = "cf" val col = "col" val `type` = "type" // the name of avro schema json string val avro = "avro" val delimiter: Byte = 0 val length = "length" val fCoder = "coder" val tableCoder = "tableCoder" // The version number of catalog val cVersion = "version" val minTableSplitPoint = "minTableSplitPoint" val maxTableSplitPoint = "maxTableSplitPoint" /** * User provide table schema definition * {"tablename":"name", "rowkey":"key1:key2", * "columns":{"col1":{"cf":"cf1", "col":"col1", "type":"type1"}, * "col2":{"cf":"cf2", "col":"col2", "type":"type2"}}} * Note that any col in the rowKey, there has to be one corresponding col defined in columns */ def apply(parameters: Map[String, String]): HBaseTableCatalog = { val jString = parameters(tableCatalog) val jObj = parse(jString).asInstanceOf[JObject] val map = jObj.values val tableMeta = map.get(table).get.asInstanceOf[Map[String, _]] val nSpace = tableMeta.get(nameSpace).getOrElse("default").asInstanceOf[String] val tName = tableMeta.get(tableName).get.asInstanceOf[String] // Since the catalog version 2.0, SHC supports Phoenix as coder. // If the catalog version specified by users is equal or later than 2.0, tableCoder must be specified. // The default catalog version is 1.0, which uses 'PrimitiveType' as the default 'tableCoder'. val vNum = tableMeta.getOrElse(cVersion, "1.0").asInstanceOf[String] val tCoder = { if (CatalogVersion(vNum).compareTo(CatalogVersion("2.0")) < 0) { tableMeta.getOrElse(tableCoder, SparkHBaseConf.PrimitiveType).asInstanceOf[String] } else { val tc = tableMeta.get(tableCoder) if (tc.isEmpty) { throw new CatalogDefinitionException("Please specify 'tableCoder' in your catalog " + "if the catalog version is equal or later than 2.0") } tc.get.asInstanceOf[String] } } val schemaMap = mutable.LinkedHashMap.empty[String, Field] var coderSet = Set(tCoder) getColsPreservingOrder(jObj).foreach { case (name, column)=> val len = column.get(length).map(_.toInt).getOrElse(-1) val sAvro = column.get(avro).map(parameters(_)) val fc = if (sAvro.isDefined) SparkHBaseConf.Avro else column.getOrElse(fCoder, tCoder) coderSet += fc val f = Field(name, column.getOrElse(cf, rowKey), column.get(col).get, fc, column.get(`type`), sAvro, len) schemaMap.+= ((name, f)) } val numReg = parameters.get(newTable).map(x => x.toInt).getOrElse(0) val rKey = RowKey(map.get(rowKey).get.asInstanceOf[String]) val minSplit = parameters.get(minTableSplitPoint).getOrElse("aaaaaa") val maxSplit = parameters.get(maxTableSplitPoint).getOrElse("zzzzzz") HBaseTableCatalog(nSpace, tName, rKey, SchemaMap(schemaMap), tCoder, coderSet, numReg, (minSplit, maxSplit)) } /** * Retrieve the columns mapping from the JObject parsed from the catalog string, * and preserve the order of the columns specification. Note that we have to use * the AST level api of json4s, because if we cast the parsed object to a scala * map directly, it would lose the ordering info during the casting. */ def getColsPreservingOrder(jObj: JObject): Seq[(String, Map[String, String])] = { val jCols = jObj.obj.find(_._1 == columns).get._2.asInstanceOf[JObject] jCols.obj.map { case (name, jvalue) => (name, jvalue.values.asInstanceOf[Map[String, String]]) } } def main(args: Array[String]) { val complex = s"""MAP<int, struct<varchar:string>>""" val schema = s"""{"namespace": "example.avro", | "type": "record", "name": "User", | "fields": [ {"name": "name", "type": "string"}, | {"name": "favorite_number", "type": ["int", "null"]}, | {"name": "favorite_color", "type": ["string", "null"]} ] }""".stripMargin val catalog = s"""{ |"table":{"namespace":"default", "name":"htable"}, |"rowkey":"key1:key2", |"columns":{ |"col1":{"cf":"rowkey", "col":"key1", "type":"string"}, |"col2":{"cf":"rowkey", "col":"key2", "type":"double"}, |"col3":{"cf":"cf1", "col":"col1", "avro":"schema1"}, |"col4":{"cf":"cf1", "col":"col2", "type":"binary"}, |"col5":{"cf":"cf1", "col":"col3", "type":"double"}, |"col6":{"cf":"cf1", "col":"col4", "type":"$complex"} |} |}""".stripMargin val parameters = Map("schema1"->schema, tableCatalog->catalog) val t = HBaseTableCatalog(parameters) val d = t.toDataType println(d) val sqlContext: SQLContext = null } }
hortonworks-spark/shc
core/src/main/scala/org/apache/spark/sql/execution/datasources/hbase/HBaseTableCatalog.scala
Scala
apache-2.0
12,421
package lila.api import play.api.libs.json._ import reactivemongo.bson._ import chess.format.pgn.Pgn import lila.analyse.{ AnalysisRepo, Analysis } import lila.common.paginator.{ Paginator, PaginatorJson } import lila.common.PimpedJson._ import lila.db.api._ import lila.db.Implicits._ import lila.db.paginator.{ BSONAdapter, CachedAdapter } import lila.game.BSONHandlers._ import lila.game.Game.{ BSONFields => G } import lila.game.tube.gameTube import lila.game.{ Game, GameRepo, PerfPicker } import lila.hub.actorApi.{ router => R } import lila.user.User import makeTimeout.short private[api] final class GameApi( netBaseUrl: String, apiToken: String, pgnDump: PgnDump, analysisApi: AnalysisApi) { def byUser( user: User, rated: Option[Boolean], analysed: Option[Boolean], withAnalysis: Boolean, withMoves: Boolean, withOpening: Boolean, withMoveTimes: Boolean, token: Option[String], nb: Option[Int], page: Option[Int]): Fu[JsObject] = Paginator( adapter = new CachedAdapter( adapter = new BSONAdapter[Game]( collection = gameTube.coll, selector = BSONDocument( G.playerUids -> user.id, G.status -> BSONDocument("$gte" -> chess.Status.Mate.id), G.rated -> rated.map(_.fold[BSONValue](BSONBoolean(true), BSONDocument("$exists" -> false))), G.analysed -> analysed.map(_.fold[BSONValue](BSONBoolean(true), BSONDocument("$exists" -> false))) ), projection = BSONDocument(), sort = BSONDocument(G.createdAt -> -1) ), nbResults = fuccess { rated.fold(user.count.game)(_.fold(user.count.rated, user.count.casual)) } ), currentPage = math.max(0, page | 1), maxPerPage = math.max(1, math.min(100, nb | 10))) flatMap { pag => gamesJson( withAnalysis = withAnalysis, withMoves = withMoves, withOpening = withOpening, withFens = false, withMoveTimes = withMoveTimes, token = token)(pag.currentPageResults) map { games => PaginatorJson(pag withCurrentPageResults games) } } def one( id: String, withAnalysis: Boolean, withMoves: Boolean, withOpening: Boolean, withFens: Boolean, withMoveTimes: Boolean, token: Option[String]): Fu[Option[JsObject]] = $find byId id flatMap { _ ?? { g => gamesJson( withAnalysis = withAnalysis, withMoves = withMoves, withOpening = withOpening, withFens = withFens && g.finished, withMoveTimes = withMoveTimes, token = token )(List(g)) map (_.headOption) } } private def makeUrl(game: Game) = s"$netBaseUrl/${game.id}/${game.firstPlayer.color.name}" private def gamesJson( withAnalysis: Boolean, withMoves: Boolean, withOpening: Boolean, withFens: Boolean, withMoveTimes: Boolean, token: Option[String])(games: Seq[Game]): Fu[Seq[JsObject]] = AnalysisRepo doneByIds games.map(_.id) flatMap { analysisOptions => (games map GameRepo.initialFen).sequenceFu map { initialFens => val validToken = check(token) games zip analysisOptions zip initialFens map { case ((g, analysisOption), initialFen) => val pgnOption = withAnalysis option pgnDump(g, initialFen) gameToJson(g, makeUrl(g), analysisOption, pgnOption, initialFen, withAnalysis = withAnalysis, withMoves = withMoves, withOpening = withOpening, withFens = withFens, withBlurs = validToken, withHold = validToken, withMoveTimes = withMoveTimes) } } } private def check(token: Option[String]) = token ?? (apiToken==) private def gameToJson( g: Game, url: String, analysisOption: Option[Analysis], pgnOption: Option[Pgn], initialFen: Option[String], withAnalysis: Boolean, withMoves: Boolean, withOpening: Boolean, withFens: Boolean, withBlurs: Boolean = false, withHold: Boolean = false, withMoveTimes: Boolean = false) = Json.obj( "id" -> g.id, "initialFen" -> initialFen, "rated" -> g.rated, "variant" -> g.variant.key, "speed" -> g.speed.key, "perf" -> PerfPicker.key(g), "timestamp" -> g.createdAt.getDate, "turns" -> g.turns, "status" -> g.status.name, "clock" -> g.clock.map { clock => Json.obj( "initial" -> clock.limit, "increment" -> clock.increment, "totalTime" -> clock.estimateTotalTime ) }, "players" -> JsObject(g.players.zipWithIndex map { case (p, i) => p.color.name -> Json.obj( "userId" -> p.userId, "name" -> p.name, "rating" -> p.rating, "ratingDiff" -> p.ratingDiff, "provisional" -> p.provisional.option(true), "moveTimes" -> withMoveTimes.fold( g.moveTimes.zipWithIndex.filter(_._2 % 2 == i).map(_._1), JsNull), "blurs" -> withBlurs.option(p.blurs), "hold" -> p.holdAlert.ifTrue(withHold).fold[JsValue](JsNull) { h => Json.obj( "ply" -> h.ply, "mean" -> h.mean, "sd" -> h.sd ) }, "analysis" -> analysisOption.flatMap(analysisApi.player(p.color)) ).noNull }), "analysis" -> analysisOption.ifTrue(withAnalysis).|@|(pgnOption).apply(analysisApi.game), "moves" -> withMoves.option(g.pgnMoves mkString " "), "opening" -> withOpening.?? { g.opening map { opening => Json.obj("code" -> opening.code, "name" -> opening.name) } }, "fens" -> withFens ?? { chess.Replay.boards(g.pgnMoves, initialFen, g.variant).toOption map { boards => JsArray(boards map chess.format.Forsyth.exportBoard map JsString.apply) } }, "winner" -> g.winnerColor.map(_.name), "url" -> url ).noNull }
JimmyMow/lila
modules/api/src/main/GameApi.scala
Scala
mit
5,945
package ru.pavkin.todoist.api.core import java.util.{TimeZone, Date} import cats.Functor import cats.syntax.functor._ import ru.pavkin.todoist.api import ru.pavkin.todoist.api.core.model._ import ru.pavkin.todoist.api.core.tags.syntax._ import ru.pavkin.todoist.api.utils.Produce import shapeless.{Inl, Inr} import FromDTO.syntax._ import scala.util.Try trait FromDTO[DTO, Model] extends Produce[DTO, Model] object FromDTO { def apply[DTO, Model](f: DTO => Model): FromDTO[DTO, Model] = new FromDTO[DTO, Model] { def produce(a: DTO): Model = f(a) } private implicit class BinaryIntOps(a: Int) { def toBool = a match { case 1 => true case 0 => false case _ => api.unexpected } } private implicit class ModelIntOps(a: Int) { def toProjectColor = ProjectColor.unsafeBy(a) def toIndent = Indent.unsafeBy(a) def toLabelColor = LabelColor.unsafeBy(a) def toDayOfWeek = DayOfWeek.unsafeBy(a) } private implicit class BoolOptionOps(a: Option[Boolean]) { def toBool = a.exists(identity) } private implicit class StringDateOps(a: String) { def toDate: Date = TodoistDate.parse(a).getOrElse(api.unexpected) } object syntax { implicit class FromDTOSyntaxOps[DTO, Model](a: DTO)(implicit F: FromDTO[DTO, Model]) { def toModel: Model = F.produce(a) } } implicit def functorFromDTO[F[_] : Functor, DTO, Model](implicit F: FromDTO[DTO, Model]): FromDTO[F[DTO], F[Model]] = FromDTO(_.map(F.produce)) private def taskDateFromDTO(due_date_utc: Option[String], date_string: Option[String], date_lang: Option[String]): Option[TaskDate] = for { date <- due_date_utc.flatMap(TodoistDate.parse) lang <- date_lang.map(DateLanguage.unsafeBy) } yield TaskDate(date_string, lang, date) implicit val projectsFromDTO: FromDTO[dto.Project, Project] = FromDTO(a => if (!a.is_archived.toBool) { RegularProject( a.id.projectId, a.user_id.userId, a.name, a.color.toProjectColor, a.indent.toIndent, a.item_order, a.collapsed.toBool, a.shared, a.is_deleted.toBool, a.inbox_project.toBool, a.team_inbox.toBool ) } else { api.unexpected } ) implicit val labelsFromDTO: FromDTO[dto.Label, Label] = FromDTO(a => Label( a.id.labelId, a.uid.userId, a.name, a.color.toLabelColor, a.item_order, a.is_deleted.toBool ) ) implicit val tasksFromDTO: FromDTO[dto.Task, Task] = FromDTO(a => Task( a.id.taskId, a.user_id.userId, a.project_id.projectId, a.content, taskDateFromDTO(a.due_date_utc, a.date_string, Some(a.date_lang)), Priority.unsafeBy(a.priority), a.indent.toIndent, a.item_order, a.day_order, a.collapsed.toBool, a.labels.map(_.labelId), a.assigned_by_uid.map(_.userId), a.responsible_uid.map(_.userId), a.checked.toBool, a.in_history.toBool, a.is_deleted.toBool, a.is_archived.toBool, a.date_added.toDate ) ) implicit val filesFromDTO: FromDTO[dto.FileAttachment, FileAttachment] = FromDTO(a => FileAttachment( a.file_name, a.file_size, a.file_type, a.file_url, UploadState.unsafe(a.upload_state) ) ) implicit val notesFromDTO: FromDTO[dto.Note, Note] = FromDTO(a => Note( a.id.noteId, a.posted_uid.userId, a.item_id.taskId, a.project_id.projectId, a.content, a.file_attachment.map(_.toModel), a.uids_to_notify.toList.flatten.map(_.userId), a.is_deleted.toBool, a.is_archived.toBool, a.posted.toDate ) ) implicit val filtersFromDTO: FromDTO[dto.Filter, Filter] = FromDTO(a => Filter( a.id.filterId, a.name, a.query, a.color.toLabelColor, a.item_order, a.is_deleted.toBool ) ) implicit val remindersFromDTO: FromDTO[dto.Reminder, Reminder] = FromDTO(a => (a.`type` match { case "relative" | "absolute" => for { dueDate <- taskDateFromDTO(a.due_date_utc, a.date_string, a.date_lang) service <- a.service.map(ReminderService.unsafeBy) } yield if (a.`type` == "relative") RelativeTimeBasedReminder( a.id.reminderId, a.notify_uid.userId, a.item_id.taskId, service, dueDate, a.minute_offset.orElse(a.mm_offset).getOrElse(api.unexpected), a.is_deleted.toBool ) else AbsoluteTimeBasedReminder( a.id.reminderId, a.notify_uid.userId, a.item_id.taskId, service, dueDate, a.is_deleted.toBool ) case "location" => for { locName <- a.name lat <- a.loc_lat.flatMap(s => Try(s.toDouble).toOption) lon <- a.loc_long.flatMap(s => Try(s.toDouble).toOption) radius <- a.radius trigger <- a.loc_trigger.map(LocationBasedReminder.TriggerKind.unsafeBy) } yield LocationBasedReminder( a.id.reminderId, a.notify_uid.userId, a.item_id.taskId, locName, lat, lon, trigger, radius, a.is_deleted.toBool ) case _ => api.unexpected }).getOrElse(api.unexpected) ) def timezoneToDTO(id: String, offset: dto.TimeZoneOffset): TimeZone = { val idBased = TimeZone.getTimeZone(id) if (idBased.getID == "GMT" && (offset.hours + offset.minutes > 0)) TimeZone.getTimeZone(s"GMT${offset.gmtString}") else idBased } implicit val usersToDTO: FromDTO[dto.User, User] = FromDTO(a => User( a.id.userId, a.email, a.full_name, a.inbox_project.projectId, timezoneToDTO(a.timezone, a.tz_offset), a.start_page, a.start_day.toDayOfWeek, a.next_week.toDayOfWeek, TimeFormat.unsafeBy(a.time_format), DateFormat.unsafeBy(a.date_format), ProjectsSortOrder.unsafeBy(a.sort_order), a.has_push_reminders, a.default_reminder.map(ReminderService.unsafeBy), a.auto_reminder, a.mobile_number, a.mobile_host, a.completed_count, a.completed_today, a.karma, a.premium_until.flatMap(TodoistDate.parse), a.is_biz_admin, a.business_account_id, a.beta.toBool, a.is_dummy.toBool, a.join_date.toDate, Theme.unsafeBy(a.theme), UserAvatars(a.avatar_small, a.avatar_medium, a.avatar_big, a.avatar_s640) ) ) // auth implicit val accessTokenFromDTO: FromDTO[dto.AccessToken, AccessToken] = FromDTO(dto => AccessToken(dto.access_token, dto.token_type) ) // command results implicit val singleCommandStatusFromDTO: FromDTO[dto.RawItemStatus, SingleCommandStatus] = FromDTO { case Inl(_) => CommandSuccess case Inr(Inl(e)) => CommandFailure(e.error_code, e.error) case Inr(Inr(cNil)) => cNil.impossible } implicit val commandStatusFromDTO: FromDTO[dto.RawCommandStatus, CommandStatus] = FromDTO { case Inl(_) => CommandSuccess case Inr(Inl(e)) => CommandFailure(e.error_code, e.error) case Inr(Inr(Inl(s))) => MultiItemCommandStatus(s.map { case (id, status) => id.toInt -> singleCommandStatusFromDTO.produce(status) }) case Inr(Inr(Inr(cNil))) => cNil.impossible } def tempIdCommandStatusFromDTO(command: TempIdCommand[_], result: dto.RawCommandResult): Option[TempIdCommandResult] = result.SyncStatus.get(command.uuid.toString).flatMap { case Inl(_) => result.TempIdMapping .flatMap(_.get(command.tempId.toString)) .map(TempIdSuccess(command.tempId, _)) .map(TempIdCommandResult(command.uuid, _)) case Inr(Inl(e)) => Some(TempIdCommandResult(command.uuid, TempIdFailure(e.error_code, e.error))) case Inr(Inr(Inl(multipleCommandStatus))) => api.unexpected case Inr(Inr(Inr(cNil))) => cNil.impossible } }
vpavkin/scalist
core/src/main/scala/ru/pavkin/todoist/api/core/FromDTO.scala
Scala
mit
8,063
package chana.jpql import chana.avro import chana.avro.Insertlog import chana.avro.UpdateAction import chana.jpql import chana.jpql.nodes._ import org.apache.avro.Schema import org.apache.avro.generic.GenericData import org.apache.avro.generic.IndexedRecord import org.codehaus.jackson.JsonNode import org.codehaus.jackson.node.ObjectNode final class JPQLMapperInsert(val id: String, meta: JPQLInsert) extends JPQLEvaluator { protected def asToEntity = meta.asToEntity protected def asToJoin = meta.asToJoin def insertEval(record: IndexedRecord) = { val stmt = meta.stmt var toInserts = List[IndexedRecord]() if (asToJoin.nonEmpty) { val joinFieldName = asToJoin.head._2.tail.head val joinField = record.getSchema.getField(joinFieldName) val recordFlatView = new avro.RecordFlatView(record, joinField) val flatRecs = recordFlatView.iterator while (flatRecs.hasNext) { val rec = flatRecs.next val whereCond = stmt.where.fold(true) { x => whereClause(x, rec) } if (whereCond) { toInserts ::= rec } } } else { val whereCond = stmt.where.fold(true) { x => whereClause(x, record) } if (whereCond) { toInserts ::= record } } var actions = List[UpdateAction]() // insert always happens to non flattern record val willInsert = toInserts.find { case avro.FlattenRecord(underlying, _, _, _) => underlying eq record case x => x eq record } isDefined if (willInsert) { val recFields = stmt.attributes match { case Some(x) => attributesClause(x, record).map { attr => record.getSchema.getField(attr) } case None => import scala.collection.JavaConversions._ record.getSchema.getFields.toList } val rows = valuesClause(stmt.values, record) for (row <- rows) { var fieldToValue = List[(Schema.Field, Any)]() var values = row var fields = recFields while (fields.nonEmpty) { if (values.nonEmpty) { fieldToValue ::= (fields.head, values.head) fields = fields.tail values = values.tail } else { throw JPQLRuntimeException(fields.head, "does not have coresponding value.") } } actions :::= opInsert(fieldToValue, record) } } actions.reverse } private def opInsert(fieldToValue: List[(Schema.Field, Any)], record: IndexedRecord) = { var actions = List[UpdateAction]() for ((field, v) <- fieldToValue) yield { field.schema.getType match { case Schema.Type.ARRAY => val elemSchema = avro.getElementType(field.schema) val value = v match { case x: JsonNode => avro.FromJson.fromJson(x, elemSchema) case x => x } val arr = record.get(field.pos) match { case null => new GenericData.Array[Any](0, field.schema) case xs: java.util.Collection[Any] @unchecked => xs } val prev = GenericData.get().deepCopy(field.schema, record.get(field.pos)) val rlback = { () => record.put(field.pos, prev) } val commit = { () => arr.add(value) } val xpath = "/" + field.name val xs = java.util.Arrays.asList(value) val bytes = avro.avroEncode(xs, field.schema).get actions ::= UpdateAction(commit, rlback, Insertlog(xpath, xs, bytes)) case Schema.Type.MAP => val valueSchema = avro.getValueType(field.schema) val (key, value) = v match { case x: ObjectNode => val kvs = x.getFields // should contain only one entry if (kvs.hasNext) { val kv = kvs.next val k = kv.getKey (k, avro.FromJson.fromJson(kv.getValue, valueSchema)) } else { throw JPQLRuntimeException(x, "does not contain anything") } case (k: String, v) => (k, v) case _ => throw JPQLRuntimeException(v, "does not contain anything") } val map = record.get(field.pos) match { case null => new java.util.HashMap[String, Any]() case xs: java.util.Map[String, Any] @unchecked => xs } val prev = GenericData.get().deepCopy(field.schema, record.get(field.pos)) val rlback = { () => record.put(field.pos, prev) } val commit = { () => map.put(key, value) } val xpath = "/" + field.name val xs = new java.util.HashMap[String, Any](1) xs.put(key, value) val bytes = avro.avroEncode(xs, field.schema).get actions ::= UpdateAction(commit, rlback, Insertlog(xpath, xs, bytes)) case _ => val value = v match { case x: JsonNode => avro.FromJson.fromJson(x, field.schema) case x => x } val prev = GenericData.get().deepCopy(field.schema, record.get(field.pos)) val rlback = { () => record.put(field.pos, prev) } val commit = { () => record.put(field.pos, value) } val xpath = "/" + field.name val xs = java.util.Arrays.asList(value) val bytes = avro.avroEncode(value, Schema.createArray(field.schema)).get actions ::= UpdateAction(commit, rlback, Insertlog(xpath, xs, bytes)) } } actions } }
hustnn/chana
src/main/scala/chana/jpql/JPQLMapperInsert.scala
Scala
apache-2.0
5,597
package se.marcuslonnberg.scaladocker.remote.api import java.io.File import java.nio.file.Files import java.nio.file.attribute.PosixFilePermission import scala.collection.convert.wrapAsScala._ object FileUtils { def listFilesRecursive(base: File): Map[String, File] = { def traverse(source: File, baseDestination: String): Map[String, File] = { val fileMappings = source.listFiles() match { case null => Map.empty[String, File] case files => files.flatMap { file => val path = baseDestination + file.getName if (file.isDirectory) { traverse(file, path + "/") + (path -> file) } else { Map(path -> file) } }.toMap } fileMappings } traverse(base, baseDestination = "/") } def filePermissions(file: File): Int = { val permissions = Files.getPosixFilePermissions(file.toPath) permissions.toSeq.map(_.toInt).sum } implicit class RichPosixFilePermission(val permission: PosixFilePermission) extends AnyVal { def toInt: Int = { permission match { case PosixFilePermission.OTHERS_EXECUTE => 1 case PosixFilePermission.OTHERS_READ => 2 case PosixFilePermission.OTHERS_WRITE => 4 case PosixFilePermission.GROUP_EXECUTE => 10 case PosixFilePermission.GROUP_READ => 20 case PosixFilePermission.GROUP_WRITE => 40 case PosixFilePermission.OWNER_EXECUTE => 100 case PosixFilePermission.OWNER_READ => 200 case PosixFilePermission.OWNER_WRITE => 400 } } } }
marcuslonnberg/scala-docker
src/main/scala/se/marcuslonnberg/scaladocker/remote/api/FileUtils.scala
Scala
mit
1,611
package spire import spire.algebra._ import spire.algebra.partial._ import spire.optional.partialIterable._ import spire.optional.mapIntIntPermutation._ import spire.std.int._ import org.scalatest.{FunSuite, Matchers, NonImplicitAssertions} import org.scalatest.prop.Checkers import org.scalacheck.{Arbitrary, Gen} import org.scalacheck.Arbitrary._ import org.scalacheck.Prop._ import spire.syntax.eq._ import spire.std.boolean._ class PartialSyntaxTest extends FunSuite with Checkers with BaseSyntaxTest with NonImplicitAssertions { import laws.SpireArbitrary._ implicit val IntGroup: Group[Int] = implicitly[AdditiveGroup[Int]].additive implicit val SeqIntEq: Eq[Seq[Int]] = spire.optional.genericEq.generic[Seq[Int]] test("Semigroupoid syntax")(check(forAll { (a: Seq[Int], b: Seq[Int]) => testSemigroupoidSyntax(a, b) })) test("Groupoid syntax")(check(forAll { (a: Seq[Int], b: Seq[Int]) => testGroupoidSyntax(a, b) })) test("Partial action syntax")(check(forAll { (seq: Seq[Int], perm: Perm) => testPartialActionSyntax(seq, perm.map) })) def testSemigroupoidSyntax[A: Semigroupoid: Eq](a: A, b: A) = { import spire.syntax.semigroupoid._ ((a |+|? b) === Semigroupoid[A].partialOp(a, b)) && ((a |+|?? b) === Semigroupoid[A].opIsDefined(a, b)) } def testGroupoidSyntax[A: Groupoid: Eq](a: A, b: A) = { import spire.syntax.groupoid._ (a.isId === Groupoid[A].isId(a)) && (a.leftId === Groupoid[A].leftId(a)) && (a.rightId === Groupoid[A].rightId(a)) && ((a |+|? b) === Groupoid[A].partialOp(a, b)) && ((a |+|?? b) === Groupoid[A].opIsDefined(a, b)) ((a |-|? b) === Groupoid[A].partialOpInverse(a, b)) && ((a |-|?? b) === Groupoid[A].opInverseIsDefined(a, b)) } def testPartialActionSyntax(seq: Seq[Int], perm: Map[Int, Int]) = { import spire.syntax.partialAction._ ((perm ?|+|> seq) === PartialAction[Seq[Int], Map[Int, Int]].partialActl(perm, seq)) && ((seq <|+|? perm) === PartialAction[Seq[Int], Map[Int, Int]].partialActr(seq, perm)) && ((perm ??|+|> seq) === PartialAction[Seq[Int], Map[Int, Int]].actlIsDefined(perm, seq)) && ((seq <|+|?? perm) === PartialAction[Seq[Int], Map[Int, Int]].actrIsDefined(seq, perm)) } }
woparry/spire
tests/src/test/scala/spire/PartialSyntaxTest.scala
Scala
mit
2,223
package com.nrinaudo.fetch import java.util.Locale /** Tools for creating instances of [[Language]]. */ object Language { /** Represents a global language, regardless of regional versions. * * English, for example, is a global language, as opposed to American English which is specific to the US. * * @param lang code of the language (for example, `en`, `fr`....). */ final case class Global(lang: String) extends Language { override def toLocale: Locale = new Locale(lang) override def toString = lang } /** Represents a regional version of a given language. * * Canadian French, for example, is a region specific version of the global French language. * * @param lang code of the language (for example, `en`, `fr`...). * @param country code of the country (for example, `FR`, `US`...). */ final case class RegionSpecific(lang: String, country: String) extends Language { override def toLocale: Locale = new Locale(lang, country) override def toString = "%s-%s" format (lang, country) } /** Describes the grammar used to read and write languages. */ trait Grammar extends HttpGrammar { def tag: Parser[String] = """\\p{Alpha}{1,8}""".r def language: Parser[Language] = tag ~ opt("-" ~> tag) ^^ { case main ~ Some(sub) => RegionSpecific(main, sub) case main ~ _ => Global(main) } } private object Format extends Grammar { def apply(value: String): Option[Language] = parseAll(language, value).map(Some(_)).getOrElse(None) } def parse(str: String): Option[Language] = Format(str) /** Returns the instance of [[Language]] that matches the specified locale. */ def apply(locale: Locale): Language = if(locale.getCountry.isEmpty) Global(locale.getLanguage) else RegionSpecific(locale.getLanguage, locale.getCountry) def unapply(language: Language): Some[Locale] = Some(language.toLocale) } /** Represents an entity's language. * * These can be used for content negotiation or * to describe the language in which a response is written. * * A language can either be [[com.nrinaudo.fetch.Language.Global global]] or * [[com.nrinaudo.fetch.Language.RegionSpecific country-specific]]. */ sealed trait Language { /** Returns the instance of `Locale` associated with this language. */ def toLocale: Locale }
nrinaudo/fetch
core/src/main/scala/com/nrinaudo/fetch/Language.scala
Scala
mit
2,386
object Test extends App { object Foo val foo = "foo" implicitly[ValueOf[1]] implicitly[ValueOf[1L]] implicitly[ValueOf[1.0]] implicitly[ValueOf[1.0F]] implicitly[ValueOf[true]] implicitly[ValueOf['f']] implicitly[ValueOf["foo"]] implicitly[ValueOf['foo]] implicitly[ValueOf[Unit]] implicitly[ValueOf[Foo.type]] implicitly[ValueOf[foo.type]] assert((valueOf[1]: 1) == 1) assert((valueOf[1L]: 1L) == 1L) assert((valueOf[1.0]: 1.0) == 1.0) assert((valueOf[1.0F]: 1.0F) == 1.0F) assert((valueOf[true]: true) == true) assert((valueOf['f']: 'f') == 'f') assert((valueOf["foo"]: "foo") == "foo") assert((valueOf['foo]: 'foo) == 'foo) assert((valueOf[Unit]: Unit) == ((): Any)) assert((valueOf[Foo.type]: Foo.type) eq Foo) assert((valueOf[foo.type]: foo.type) eq foo) }
jastice/intellij-scala
scala/scala-impl/testdata/annotator/literalTypes/sip23ValueOf.scala
Scala
apache-2.0
811
/* * FILE: JoinQueryDetector.scala * Copyright (c) 2015 - 2019 GeoSpark Development Team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.geosparksql.strategy.join import org.apache.spark.sql.Strategy import org.apache.spark.sql.catalyst.expressions.{Expression, LessThan, LessThanOrEqual} import org.apache.spark.sql.catalyst.plans.Inner import org.apache.spark.sql.catalyst.plans.logical.{Join, LogicalPlan} import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.geosparksql.expressions._ /** * Plans `RangeJoinExec` for inner joins on spatial relationships ST_Contains(a, b) * and ST_Intersects(a, b). * * Plans `DistanceJoinExec` for inner joins on spatial relationship ST_Distance(a, b) < r. */ object JoinQueryDetector extends Strategy { /** * Returns true if specified expression has at least one reference and all its references * map to the output of the specified plan. */ private def matches(expr: Expression, plan: LogicalPlan): Boolean = expr.references.find(plan.outputSet.contains(_)).isDefined && expr.references.find(!plan.outputSet.contains(_)).isEmpty private def matchExpressionsToPlans(exprA: Expression, exprB: Expression, planA: LogicalPlan, planB: LogicalPlan): Option[(LogicalPlan, LogicalPlan)] = if (matches(exprA, planA) && matches(exprB, planB)) { Some((planA, planB)) } else if (matches(exprA, planB) && matches(exprB, planA)) { Some((planB, planA)) } else { None } def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { // ST_Contains(a, b) - a contains b case Join(left, right, Inner, Some(ST_Contains(Seq(leftShape, rightShape)))) => planSpatialJoin(left, right, Seq(leftShape, rightShape), false) // ST_Intersects(a, b) - a intersects b case Join(left, right, Inner, Some(ST_Intersects(Seq(leftShape, rightShape)))) => planSpatialJoin(left, right, Seq(leftShape, rightShape), true) // ST_WITHIN(a, b) - a is within b case Join(left, right, Inner, Some(ST_Within(Seq(leftShape, rightShape)))) => planSpatialJoin(right, left, Seq(rightShape, leftShape), false) // ST_Overlaps(a, b) - a overlaps b case Join(left, right, Inner, Some(ST_Overlaps(Seq(leftShape, rightShape)))) => planSpatialJoin(right, left, Seq(rightShape, leftShape), false) // ST_Touches(a, b) - a touches b case Join(left, right, Inner, Some(ST_Touches(Seq(leftShape, rightShape)))) => planSpatialJoin(left, right, Seq(leftShape, rightShape), true) // ST_Distance(a, b) <= radius consider boundary intersection case Join(left, right, Inner, Some(LessThanOrEqual(ST_Distance(Seq(leftShape, rightShape)), radius))) => planDistanceJoin(left, right, Seq(leftShape, rightShape), radius, true) // ST_Distance(a, b) < radius don't consider boundary intersection case Join(left, right, Inner, Some(LessThan(ST_Distance(Seq(leftShape, rightShape)), radius))) => planDistanceJoin(left, right, Seq(leftShape, rightShape), radius, false) // ST_Equals(a, b) - a is equal to b case Join(left, right, Inner, Some(ST_Equals(Seq(leftShape, rightShape)))) => planSpatialJoin(left, right, Seq(leftShape, rightShape), false) // ST_Crosses(a, b) - a crosses b case Join(left, right, Inner, Some(ST_Crosses(Seq(leftShape, rightShape)))) => planSpatialJoin(right, left, Seq(rightShape, leftShape), false) case _ => Nil } private def planSpatialJoin(left: LogicalPlan, right: LogicalPlan, children: Seq[Expression], intersects: Boolean, extraCondition: Option[Expression] = None): Seq[SparkPlan] = { val a = children.head val b = children.tail.head val relationship = if (intersects) "ST_Intersects" else "ST_Contains"; matchExpressionsToPlans(a, b, left, right) match { case Some((planA, planB)) => logInfo(s"Planning spatial join for $relationship relationship") RangeJoinExec(planLater(planA), planLater(planB), a, b, intersects, extraCondition) :: Nil case None => logInfo( s"Spatial join for $relationship with arguments not aligned " + "with join relations is not supported") Nil } } private def planDistanceJoin(left: LogicalPlan, right: LogicalPlan, children: Seq[Expression], radius: Expression, intersects: Boolean, extraCondition: Option[Expression] = None): Seq[SparkPlan] = { val a = children.head val b = children.tail.head val relationship = if (intersects) "ST_Distance <=" else "ST_Distance <"; matchExpressionsToPlans(a, b, left, right) match { case Some((planA, planB)) => if (radius.references.isEmpty || matches(radius, planA)) { logInfo("Planning spatial distance join") DistanceJoinExec(planLater(planA), planLater(planB), a, b, radius, intersects, extraCondition) :: Nil } else if (matches(radius, planB)) { logInfo("Planning spatial distance join") DistanceJoinExec(planLater(planB), planLater(planA), b, a, radius, intersects, extraCondition) :: Nil } else { logInfo( "Spatial distance join for ST_Distance with non-scalar radius " + "that is not a computation over just one side of the join is not supported") Nil } case None => logInfo( "Spatial distance join for ST_Distance with arguments not " + "aligned with join relations is not supported") Nil } } }
Sarwat/GeoSpark
sql/src/main/scala/org/apache/spark/sql/geosparksql/strategy/join/JoinQueryDetector.scala
Scala
mit
6,451
import scala.quoted._ object Bar { Foo.myMacro() // error }
som-snytt/dotty
tests/neg-macros/macros-in-same-project-6/Bar.scala
Scala
apache-2.0
63
package org.jetbrains.plugins.hocon.editor import com.intellij.codeInsight.editorActions.JoinLinesHandlerDelegate import com.intellij.codeInsight.editorActions.JoinLinesHandlerDelegate.CANNOT_JOIN import com.intellij.openapi.editor.Document import com.intellij.psi.PsiFile import com.intellij.util.text.CharArrayUtil import org.jetbrains.plugins.hocon.CommonUtil import org.jetbrains.plugins.hocon.lexer.HoconTokenSets import org.jetbrains.plugins.hocon.psi.HoconPsiFile /** * HOCON line comments can start with either '//' or '#'. Unfortunately, only one of them can be declared in * [[HoconCommenter]] and so I need this custom join lines handler to properly handle both. */ class HoconCommentJoinLinesHandler extends JoinLinesHandlerDelegate { def tryJoinLines(document: Document, file: PsiFile, start: Int, end: Int): Int = file match { case _: HoconPsiFile => import CommonUtil._ val element = file.findElementAt(start) if (element != null && HoconTokenSets.Comment.contains(element.getNode.getElementType)) { val joinedSequence = document.getCharsSequence.subSequence(end, document.getTextLength) List("#", "//").find(joinedSequence.startsWith).map { nextPrefix => val toRemoveEnd = CharArrayUtil.shiftForward(document.getCharsSequence, end + nextPrefix.length, element.getTextRange.getEndOffset, " \\t") document.replaceString(start + 1, toRemoveEnd, " ") start + 1 } getOrElse CANNOT_JOIN } else CANNOT_JOIN case _ => CANNOT_JOIN } }
ghik/intellij-hocon
src/org/jetbrains/plugins/hocon/editor/HoconCommentJoinLinesHandler.scala
Scala
apache-2.0
1,551
package reactivemongo.core.protocol import akka.actor.ActorRef import reactivemongo.io.netty.channel.{ ChannelHandlerContext, ChannelDuplexHandler, ChannelPromise } import reactivemongo.io.netty.handler.timeout.IdleStateEvent import reactivemongo.core.actors.{ ChannelConnected, ChannelDisconnected } import reactivemongo.util.LazyLogger private[reactivemongo] class MongoHandler( supervisor: String, connection: String, receiver: ActorRef) extends ChannelDuplexHandler { private var last: Long = -1L // in nano-precision override def channelActive(ctx: ChannelHandlerContext): Unit = { log(ctx, "Channel is active") last = System.nanoTime() receiver ! ChannelConnected(ctx.channel.id) super.channelActive(ctx) } override def userEventTriggered( ctx: ChannelHandlerContext, evt: Any): Unit = { evt match { case _: IdleStateEvent => { if (last != -1L) { val now = System.nanoTime() log( ctx, s"Channel has been inactive for ${now - last} (last = $last)") } ctx.channel.close() // configured timeout - See channelInactive } case _ => } super.userEventTriggered(ctx, evt) } @SuppressWarnings(Array("NullParameter")) override def channelInactive(ctx: ChannelHandlerContext): Unit = { val now = System.nanoTime() if (last != -1) { val chan = ctx.channel val delay = now - last def msg = s"Channel is closed under ${delay}ns: ${chan.remoteAddress}" if (delay < 500000000) { warn(ctx, s"${msg}; Please check network connectivity and the status of the set.") } else if (chan.remoteAddress != null) { log(ctx, msg) } last = now receiver ! ChannelDisconnected(chan.id) } super.channelInactive(ctx) } override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = { last = System.nanoTime() msg match { case response: Response => { log(ctx, s"Channel received message $response; Will be send to ${receiver.path}") receiver ! response //super.channelRead(ctx, msg) - Do not bubble as it's the last handler } case _ => { log(ctx, s"Unexpected message: $msg") //super.channelRead(ctx, msg) } } } override def write( ctx: ChannelHandlerContext, msg: Any, promise: ChannelPromise): Unit = { log(ctx, "Channel is requested to write") last = System.nanoTime() super.write(ctx, msg, promise) } override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) = { log(ctx, s"Error on channel #${ctx.channel.id}", cause) //super.exceptionCaught(ctx, cause) - Do not bubble as it's the last handler } override def handlerAdded(ctx: ChannelHandlerContext): Unit = { if (ctx.channel.isActive) { channelActive(ctx) } super.handlerAdded(ctx) } /* override def channelReadComplete(ctx: ChannelHandlerContext): Unit = { println("_READ_COMP") super.channelReadComplete(ctx) } override def channelRegistered(ctx: ChannelHandlerContext): Unit = { println(s"_REG #${ctx.channel.id}") super.channelRegistered(ctx) } override def channelUnregistered(ctx: ChannelHandlerContext): Unit = { println(s"_UNREG #${ctx.channel.id}") super.channelUnregistered(ctx) } override def handlerRemoved(ctx: ChannelHandlerContext): Unit = { println(s"_REMOVED #${ctx.channel.id}") super.handlerRemoved(ctx) } */ @inline def warn(ctx: ChannelHandlerContext, s: String) = MongoHandler.logger.warn( s"[$supervisor/$connection] $s (channel ${ctx.channel})") @inline def log(ctx: ChannelHandlerContext, s: String) = MongoHandler.logger.trace( s"[$supervisor/$connection] $s (channel ${ctx.channel})") @inline def log(ctx: ChannelHandlerContext, s: String, cause: Throwable) = MongoHandler.logger.trace( s"[$supervisor/$connection] $s (channel ${ctx.channel})", cause) } private[reactivemongo] object MongoHandler { val logger = LazyLogger("reactivemongo.core.protocol.MongoHandler") }
ReactiveMongo/ReactiveMongo
driver/src/main/scala/core/protocol/MongoHandler.scala
Scala
apache-2.0
4,149
package com.twitter.finagle.context import com.twitter.finagle.netty3.ChannelBufferBuf import com.twitter.io.Buf import com.twitter.util.{Local, Try, Return, Throw} import scala.collection.mutable /** * A context contains a number of let-delimited bindings. Bindings * are indexed by type Key[A] in a typesafe manner. Later bindings * shadow earlier ones. * * Note that the implementation of context maintains all bindings * in a linked list; context lookup requires a linear search. */ trait Context { type Key[A] sealed trait Env { /** * Retrieve the current definition of a key. * * @throws NoSuchElementException when the key is undefined * in this environment. */ @throws[NoSuchElementException]("If the key does not exist") def apply[A](key: Key[A]): A /** * Retrieve the current definition of a key, but only * if it is defined. */ def get[A](key: Key[A]): Option[A] /** * Retrieve the current definition of a key if it is defined. * If it is not defined, `orElse` is evaluated and returned. */ def getOrElse[A](key: Key[A], orElse: () => A): A /** * Tells whether `key` is defined in this environment. */ def contains[A](key: Key[A]): Boolean /** * Create a derived environment where `key` is bound to * `value`; previous bindings of `key` are shadowed. */ final def bound[A](key: Key[A], value: A): Env = Bound(this, key, value) /** * Clear the binding for the given key. Lookups for `key` will be * negative in the returned environment. */ final def cleared(key: Key[_]): Env = Cleared(this, key) } /** * An empty environment. No keys are present. */ object Empty extends Env { def apply[A](key: Key[A]) = throw new NoSuchElementException def get[A](key: Key[A]) = None def getOrElse[A](key: Key[A], orElse: () => A): A = orElse() def contains[A](key: Key[A]) = false override def toString = "<empty com.twitter.finagle.context.Env>" } /** * An environment with `key` bound to `value`; lookups for other keys * are forwarded to `next`. */ case class Bound[A](next: Env, key: Key[A], value: A) extends Env { def apply[B](key: Key[B]): B = if (key == this.key) value.asInstanceOf[B] else next(key) def get[B](key: Key[B]): Option[B] = if (key == this.key) Some(value.asInstanceOf[B]) else next.get(key) def getOrElse[B](key: Key[B], orElse: () => B): B = if (key == this.key) value.asInstanceOf[B] else next.getOrElse(key, orElse) def contains[B](key: Key[B]): Boolean = key == this.key || next.contains(key) override def toString = s"Bound($key, $value) :: $next" } /** * An environment without `key`. Lookups for other keys * are forwarded to `next`. */ case class Cleared[A](next: Env, key: Key[A]) extends Env { def apply[B](key: Key[B]) = if (key == this.key) throw new NoSuchElementException else next(key) def get[B](key: Key[B]) = if (key == this.key) None else next.get(key) def getOrElse[B](key: Key[B], orElse: () => B): B = if (key == this.key) orElse() else next.getOrElse(key, orElse) def contains[B](key: Key[B]) = key != this.key && next.contains(key) override def toString = s"Clear($key) :: $next" } /** * Concatenate two environments with left-hand side precedence. */ case class OrElse(left: Env, right: Env) extends Env { def apply[A](key: Key[A]) = if (left.contains(key)) left.apply(key) else right.apply(key) def get[A](key: Key[A]) = if (left.contains(key)) left.get(key) else right.get(key) def getOrElse[A](key: Key[A], orElse: () => A): A = left.getOrElse(key, () => right.getOrElse(key, orElse)) def contains[A](key: Key[A]) = left.contains(key) || right.contains(key) override def toString = s"OrElse($left, $right)" } private[this] val local = new Local[Env] private[finagle] def env: Env = local() match { case Some(env) => env case None => Empty } /** * Retrieve the current definition of a key. * * @throws NoSuchElementException when the key is undefined * in the current request-local context. */ @throws[NoSuchElementException]("If the key does not exist") def apply[A](key: Key[A]): A = env(key) /** * Retrieve the current definition of a key, but only * if it is defined in the current request-local context. */ def get[A](key: Key[A]): Option[A] = env.get(key) /** * Retrieve the current definition of a key if it is defined. * If it is not defined, `orElse` is evaluated and returned. */ def getOrElse[A](key: Key[A], orElse: () => A): A = env.getOrElse(key, orElse) /** * Tells whether `key` is defined in the current request-local * context. */ def contains[A](key: Key[A]): Boolean = env.contains(key) /** * Bind `value` to `key` in the scope of `fn`. */ def let[A, R](key: Key[A], value: A)(fn: => R): R = local.let(env.bound(key, value))(fn) /** * Bind two keys and values in the scope of `fn`. */ def let[A, B, R](key1: Key[A], value1: A, key2: Key[B], value2: B)(fn: => R): R = local.let(env.bound(key1, value1).bound(key2, value2))(fn) /** * Bind the given environment. */ private[finagle] def let[R](env1: Env)(fn: => R): R = local.let(OrElse(env1, env))(fn) /** * Unbind the passed-in keys, in the scope of `fn`. */ def letClear[R](keys: Key[_]*)(fn: => R): R = { val newEnv = keys.foldLeft(env) { case (e, k) => e.cleared(k) } local.let(newEnv)(fn) } } /** * A marshalled context contains bindings that may be * marshalled and sent across process boundaries. A set * of marshalled bindings may be restored in the local * environment. Thus we can use marshalled contexts to * propagate a set of bindings across a whole request * tree. */ final class MarshalledContext extends Context { /** * Keys in MarshalledContext must provide a marshaller * and unmarshaller. */ abstract class Key[A](id: String) { /** * A unique identifier defining this marshaller. This is * transmitted together with marshalled values in order to * pick the the appropriate unmarshaller for a given value. */ final val marshalId: Buf = Buf.ByteBuffer.coerce(Buf.Utf8(id)) /** * Marshal an A-typed value into a Buf. */ def marshal(value: A): Buf /** * Attempt to unmarshal an A-typed context value. */ def tryUnmarshal(buf: Buf): Try[A] } /** * A translucent environment is capable of storing key/value pairs * to be (possibly) unmarshalled later. */ case class Translucent(next: Env, marshalId: Buf, marshalled: Buf) extends Env { @volatile private var cachedEnv: Env = null private def env[A](key: Key[A]): Env = { if (cachedEnv != null) cachedEnv else if (key.marshalId != marshalId) next else (key.tryUnmarshal(marshalled): Try[A]) match { case Return(value) => cachedEnv = Bound(next, key, value) cachedEnv case Throw(_) => // Should we omit the context altogether when this happens? // Should we log some warnings? next } } def apply[A](key: Key[A]): A = env(key).apply(key) def get[A](key: Key[A]): Option[A] = env(key).get(key) def getOrElse[A](key: Key[A], orElse: () => A): A = env(key).getOrElse(key, orElse) def contains[A](key: Key[A]): Boolean = env(key).contains(key) override def toString = if (cachedEnv != null) cachedEnv.toString else { val Buf.Utf8(id8) = marshalId s"Translucent(${id8}(${marshalled.length})) :: $next" } } private def marshalMap(env: Env, map: mutable.Map[Buf, Buf]): Unit = env match { case Bound(next, key, value) => marshalMap(next, map) map.put(key.marshalId, key.marshal(value)) case Translucent(next, id, marshalled) => marshalMap(next, map) map.put(id, marshalled) case OrElse(left, right) => marshalMap(right, map) marshalMap(left, map) case Cleared(next, key) => marshalMap(next, map) map.remove(key.marshalId) case Empty => () } /** * Store into the current environment a set of marshalled * bindings and run `fn`. Bindings are unmarshalled on demand. */ def letUnmarshal[R](contexts: Iterable[(Buf, Buf)])(fn: => R): R = { val u = new Unmarshaller(env) for ((id, marshalled) <- contexts) u.put(id, marshalled) let(u.build)(fn) } /** * Marshal the `env` into a set of (id, value) pairs. */ def marshal(env: Env): Iterable[(Buf, Buf)] = { val map = mutable.Map[Buf, Buf]() marshalMap(env, map) map } /** * Marshal the current environment into a set of (id, value) pairs. */ def marshal(): Iterable[(Buf, Buf)] = marshal(env) /** * Produce an environment consisting of the given marshalled * (id, value) pairs. They are unmarshalled on demand. */ def unmarshal(contexts: Iterable[(Buf, Buf)]): Env = { val builder = new Unmarshaller for ((id, marshalled) <- contexts) builder.put(id, marshalled) builder.build } /** * An Unmarshaller gradually builds up an environment from * a set of (id, value) pairs. */ class Unmarshaller(init: Env) { def this() = this(Empty) private[this] var env = init def put(id: Buf, marshalled: Buf) { // Copy the Bufs to avoid indirectly keeping a reference to Netty internal buffer (big) env = Translucent(env, copy(id), copy(marshalled)) } def build: Env = env private[this] def copy(buf: Buf): Buf = buf match { case ChannelBufferBuf(cb) => Buf.ByteBuffer.Shared(cb.toByteBuffer) case _ => buf } } }
jay-johnson/finagle
finagle-core/src/main/scala/com/twitter/finagle/context/Context.scala
Scala
apache-2.0
9,959
trait FooBase { type Bar >: Null <: BarBase { type This <: FooBase.this.Bar } type This >: this.type <: FooBase { type This <: FooBase.this.This } def derived(bar: Bar): This = ??? } trait BarBase { type This >: Null <: BarBase { type This <: BarBase.this.This } } object Test { def bad(foo: FooBase): FooBase = foo match { case foo: FooBase => foo.derived(???) // Triggers infinite loop in TypeAssigner.avoid() } }
dotty-staging/dotty
tests/pos/i2941.scala
Scala
apache-2.0
442
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.rdd import scala.collection.mutable.HashMap import org.scalatest.FunSuite import org.scalatest.concurrent.Timeouts._ import org.scalatest.time.{Span, Millis} import org.apache.spark.SparkContext._ import org.apache.spark.rdd._ import scala.collection.parallel.mutable import org.apache.spark._ class RDDSuite extends FunSuite with SharedSparkContext { test("basic operations") { val nums = sc.makeRDD(Array(1, 2, 3, 4), 2) assert(nums.collect().toList === List(1, 2, 3, 4)) val dups = sc.makeRDD(Array(1, 1, 2, 2, 3, 3, 4, 4), 2) assert(dups.distinct().count() === 4) assert(dups.distinct.count === 4) // Can distinct and count be called without parentheses? assert(dups.distinct.collect === dups.distinct().collect) assert(dups.distinct(2).collect === dups.distinct().collect) assert(nums.reduce(_ + _) === 10) assert(nums.fold(0)(_ + _) === 10) assert(nums.map(_.toString).collect().toList === List("1", "2", "3", "4")) assert(nums.filter(_ > 2).collect().toList === List(3, 4)) assert(nums.flatMap(x => 1 to x).collect().toList === List(1, 1, 2, 1, 2, 3, 1, 2, 3, 4)) assert(nums.union(nums).collect().toList === List(1, 2, 3, 4, 1, 2, 3, 4)) assert(nums.glom().map(_.toList).collect().toList === List(List(1, 2), List(3, 4))) assert(nums.collect({ case i if i >= 3 => i.toString }).collect().toList === List("3", "4")) assert(nums.keyBy(_.toString).collect().toList === List(("1", 1), ("2", 2), ("3", 3), ("4", 4))) val partitionSums = nums.mapPartitions(iter => Iterator(iter.reduceLeft(_ + _))) assert(partitionSums.collect().toList === List(3, 7)) val partitionSumsWithSplit = nums.mapPartitionsWithSplit { case(split, iter) => Iterator((split, iter.reduceLeft(_ + _))) } assert(partitionSumsWithSplit.collect().toList === List((0, 3), (1, 7))) val partitionSumsWithIndex = nums.mapPartitionsWithIndex { case(split, iter) => Iterator((split, iter.reduceLeft(_ + _))) } assert(partitionSumsWithIndex.collect().toList === List((0, 3), (1, 7))) intercept[UnsupportedOperationException] { nums.filter(_ > 5).reduce(_ + _) } } test("SparkContext.union") { val nums = sc.makeRDD(Array(1, 2, 3, 4), 2) assert(sc.union(nums).collect().toList === List(1, 2, 3, 4)) assert(sc.union(nums, nums).collect().toList === List(1, 2, 3, 4, 1, 2, 3, 4)) assert(sc.union(Seq(nums)).collect().toList === List(1, 2, 3, 4)) assert(sc.union(Seq(nums, nums)).collect().toList === List(1, 2, 3, 4, 1, 2, 3, 4)) } test("aggregate") { val pairs = sc.makeRDD(Array(("a", 1), ("b", 2), ("a", 2), ("c", 5), ("a", 3))) type StringMap = HashMap[String, Int] val emptyMap = new StringMap { override def default(key: String): Int = 0 } val mergeElement: (StringMap, (String, Int)) => StringMap = (map, pair) => { map(pair._1) += pair._2 map } val mergeMaps: (StringMap, StringMap) => StringMap = (map1, map2) => { for ((key, value) <- map2) { map1(key) += value } map1 } val result = pairs.aggregate(emptyMap)(mergeElement, mergeMaps) assert(result.toSet === Set(("a", 6), ("b", 2), ("c", 5))) } test("basic caching") { val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache() assert(rdd.collect().toList === List(1, 2, 3, 4)) assert(rdd.collect().toList === List(1, 2, 3, 4)) assert(rdd.collect().toList === List(1, 2, 3, 4)) } test("caching with failures") { val onlySplit = new Partition { override def index: Int = 0 } var shouldFail = true val rdd = new RDD[Int](sc, Nil) { override def getPartitions: Array[Partition] = Array(onlySplit) override val getDependencies = List[Dependency[_]]() override def compute(split: Partition, context: TaskContext): Iterator[Int] = { if (shouldFail) { throw new Exception("injected failure") } else { return Array(1, 2, 3, 4).iterator } } }.cache() val thrown = intercept[Exception]{ rdd.collect() } assert(thrown.getMessage.contains("injected failure")) shouldFail = false assert(rdd.collect().toList === List(1, 2, 3, 4)) } test("empty RDD") { val empty = new EmptyRDD[Int](sc) assert(empty.count === 0) assert(empty.collect().size === 0) val thrown = intercept[UnsupportedOperationException]{ empty.reduce(_+_) } assert(thrown.getMessage.contains("empty")) val emptyKv = new EmptyRDD[(Int, Int)](sc) val rdd = sc.parallelize(1 to 2, 2).map(x => (x, x)) assert(rdd.join(emptyKv).collect().size === 0) assert(rdd.rightOuterJoin(emptyKv).collect().size === 0) assert(rdd.leftOuterJoin(emptyKv).collect().size === 2) assert(rdd.cogroup(emptyKv).collect().size === 2) assert(rdd.union(emptyKv).collect().size === 2) } test("repartitioned RDDs") { val data = sc.parallelize(1 to 1000, 10) // Coalesce partitions val repartitioned1 = data.repartition(2) assert(repartitioned1.partitions.size == 2) val partitions1 = repartitioned1.glom().collect() assert(partitions1(0).length > 0) assert(partitions1(1).length > 0) assert(repartitioned1.collect().toSet === (1 to 1000).toSet) // Split partitions val repartitioned2 = data.repartition(20) assert(repartitioned2.partitions.size == 20) val partitions2 = repartitioned2.glom().collect() assert(partitions2(0).length > 0) assert(partitions2(19).length > 0) assert(repartitioned2.collect().toSet === (1 to 1000).toSet) } test("coalesced RDDs") { val data = sc.parallelize(1 to 10, 10) val coalesced1 = data.coalesce(2) assert(coalesced1.collect().toList === (1 to 10).toList) assert(coalesced1.glom().collect().map(_.toList).toList === List(List(1, 2, 3, 4, 5), List(6, 7, 8, 9, 10))) // Check that the narrow dependency is also specified correctly assert(coalesced1.dependencies.head.asInstanceOf[NarrowDependency[_]].getParents(0).toList === List(0, 1, 2, 3, 4)) assert(coalesced1.dependencies.head.asInstanceOf[NarrowDependency[_]].getParents(1).toList === List(5, 6, 7, 8, 9)) val coalesced2 = data.coalesce(3) assert(coalesced2.collect().toList === (1 to 10).toList) assert(coalesced2.glom().collect().map(_.toList).toList === List(List(1, 2, 3), List(4, 5, 6), List(7, 8, 9, 10))) val coalesced3 = data.coalesce(10) assert(coalesced3.collect().toList === (1 to 10).toList) assert(coalesced3.glom().collect().map(_.toList).toList === (1 to 10).map(x => List(x)).toList) // If we try to coalesce into more partitions than the original RDD, it should just // keep the original number of partitions. val coalesced4 = data.coalesce(20) assert(coalesced4.collect().toList === (1 to 10).toList) assert(coalesced4.glom().collect().map(_.toList).toList === (1 to 10).map(x => List(x)).toList) // we can optionally shuffle to keep the upstream parallel val coalesced5 = data.coalesce(1, shuffle = true) assert(coalesced5.dependencies.head.rdd.dependencies.head.rdd.asInstanceOf[ShuffledRDD[_, _, _]] != null) // when shuffling, we can increase the number of partitions val coalesced6 = data.coalesce(20, shuffle = true) assert(coalesced6.partitions.size === 20) assert(coalesced6.collect().toSet === (1 to 10).toSet) } test("coalesced RDDs with locality") { val data3 = sc.makeRDD(List((1,List("a","c")), (2,List("a","b","c")), (3,List("b")))) val coal3 = data3.coalesce(3) val list3 = coal3.partitions.map(p => p.asInstanceOf[CoalescedRDDPartition].preferredLocation) assert(list3.sorted === Array("a","b","c"), "Locality preferences are dropped") // RDD with locality preferences spread (non-randomly) over 6 machines, m0 through m5 val data = sc.makeRDD((1 to 9).map(i => (i, (i to (i+2)).map{ j => "m" + (j%6)}))) val coalesced1 = data.coalesce(3) assert(coalesced1.collect().toList.sorted === (1 to 9).toList, "Data got *lost* in coalescing") val splits = coalesced1.glom().collect().map(_.toList).toList assert(splits.length === 3, "Supposed to coalesce to 3 but got " + splits.length) assert(splits.forall(_.length >= 1) === true, "Some partitions were empty") // If we try to coalesce into more partitions than the original RDD, it should just // keep the original number of partitions. val coalesced4 = data.coalesce(20) val listOfLists = coalesced4.glom().collect().map(_.toList).toList val sortedList = listOfLists.sortWith{ (x, y) => !x.isEmpty && (y.isEmpty || (x(0) < y(0))) } assert(sortedList === (1 to 9). map{x => List(x)}.toList, "Tried coalescing 9 partitions to 20 but didn't get 9 back") } test("coalesced RDDs with locality, large scale (10K partitions)") { // large scale experiment import collection.mutable val rnd = scala.util.Random val partitions = 10000 val numMachines = 50 val machines = mutable.ListBuffer[String]() (1 to numMachines).foreach(machines += "m"+_) val blocks = (1 to partitions).map(i => { (i, Array.fill(3)(machines(rnd.nextInt(machines.size))).toList) } ) val data2 = sc.makeRDD(blocks) val coalesced2 = data2.coalesce(numMachines*2) // test that you get over 90% locality in each group val minLocality = coalesced2.partitions .map(part => part.asInstanceOf[CoalescedRDDPartition].localFraction) .foldLeft(1.)((perc, loc) => math.min(perc,loc)) assert(minLocality >= 0.90, "Expected 90% locality but got " + (minLocality*100.).toInt + "%") // test that the groups are load balanced with 100 +/- 20 elements in each val maxImbalance = coalesced2.partitions .map(part => part.asInstanceOf[CoalescedRDDPartition].parents.size) .foldLeft(0)((dev, curr) => math.max(math.abs(100-curr),dev)) assert(maxImbalance <= 20, "Expected 100 +/- 20 per partition, but got " + maxImbalance) val data3 = sc.makeRDD(blocks).map(i => i*2) // derived RDD to test *current* pref locs val coalesced3 = data3.coalesce(numMachines*2) val minLocality2 = coalesced3.partitions .map(part => part.asInstanceOf[CoalescedRDDPartition].localFraction) .foldLeft(1.)((perc, loc) => math.min(perc,loc)) assert(minLocality2 >= 0.90, "Expected 90% locality for derived RDD but got " + (minLocality2*100.).toInt + "%") } test("zipped RDDs") { val nums = sc.makeRDD(Array(1, 2, 3, 4), 2) val zipped = nums.zip(nums.map(_ + 1.0)) assert(zipped.glom().map(_.toList).collect().toList === List(List((1, 2.0), (2, 3.0)), List((3, 4.0), (4, 5.0)))) intercept[IllegalArgumentException] { nums.zip(sc.parallelize(1 to 4, 1)).collect() } } test("partition pruning") { val data = sc.parallelize(1 to 10, 10) // Note that split number starts from 0, so > 8 means only 10th partition left. val prunedRdd = new PartitionPruningRDD(data, splitNum => splitNum > 8) assert(prunedRdd.partitions.size === 1) val prunedData = prunedRdd.collect() assert(prunedData.size === 1) assert(prunedData(0) === 10) } test("mapWith") { import java.util.Random val ones = sc.makeRDD(Array(1, 1, 1, 1, 1, 1), 2) val randoms = ones.mapWith( (index: Int) => new Random(index + 42)) {(t: Int, prng: Random) => prng.nextDouble * t}.collect() val prn42_3 = { val prng42 = new Random(42) prng42.nextDouble(); prng42.nextDouble(); prng42.nextDouble() } val prn43_3 = { val prng43 = new Random(43) prng43.nextDouble(); prng43.nextDouble(); prng43.nextDouble() } assert(randoms(2) === prn42_3) assert(randoms(5) === prn43_3) } test("flatMapWith") { import java.util.Random val ones = sc.makeRDD(Array(1, 1, 1, 1, 1, 1), 2) val randoms = ones.flatMapWith( (index: Int) => new Random(index + 42)) {(t: Int, prng: Random) => val random = prng.nextDouble() Seq(random * t, random * t * 10)}. collect() val prn42_3 = { val prng42 = new Random(42) prng42.nextDouble(); prng42.nextDouble(); prng42.nextDouble() } val prn43_3 = { val prng43 = new Random(43) prng43.nextDouble(); prng43.nextDouble(); prng43.nextDouble() } assert(randoms(5) === prn42_3 * 10) assert(randoms(11) === prn43_3 * 10) } test("filterWith") { import java.util.Random val ints = sc.makeRDD(Array(1, 2, 3, 4, 5, 6), 2) val sample = ints.filterWith( (index: Int) => new Random(index + 42)) {(t: Int, prng: Random) => prng.nextInt(3) == 0}. collect() val checkSample = { val prng42 = new Random(42) val prng43 = new Random(43) Array(1, 2, 3, 4, 5, 6).filter{i => if (i < 4) 0 == prng42.nextInt(3) else 0 == prng43.nextInt(3)} } assert(sample.size === checkSample.size) for (i <- 0 until sample.size) assert(sample(i) === checkSample(i)) } test("take") { var nums = sc.makeRDD(Range(1, 1000), 1) assert(nums.take(0).size === 0) assert(nums.take(1) === Array(1)) assert(nums.take(3) === Array(1, 2, 3)) assert(nums.take(500) === (1 to 500).toArray) assert(nums.take(501) === (1 to 501).toArray) assert(nums.take(999) === (1 to 999).toArray) assert(nums.take(1000) === (1 to 999).toArray) nums = sc.makeRDD(Range(1, 1000), 2) assert(nums.take(0).size === 0) assert(nums.take(1) === Array(1)) assert(nums.take(3) === Array(1, 2, 3)) assert(nums.take(500) === (1 to 500).toArray) assert(nums.take(501) === (1 to 501).toArray) assert(nums.take(999) === (1 to 999).toArray) assert(nums.take(1000) === (1 to 999).toArray) nums = sc.makeRDD(Range(1, 1000), 100) assert(nums.take(0).size === 0) assert(nums.take(1) === Array(1)) assert(nums.take(3) === Array(1, 2, 3)) assert(nums.take(500) === (1 to 500).toArray) assert(nums.take(501) === (1 to 501).toArray) assert(nums.take(999) === (1 to 999).toArray) assert(nums.take(1000) === (1 to 999).toArray) nums = sc.makeRDD(Range(1, 1000), 1000) assert(nums.take(0).size === 0) assert(nums.take(1) === Array(1)) assert(nums.take(3) === Array(1, 2, 3)) assert(nums.take(500) === (1 to 500).toArray) assert(nums.take(501) === (1 to 501).toArray) assert(nums.take(999) === (1 to 999).toArray) assert(nums.take(1000) === (1 to 999).toArray) } test("top with predefined ordering") { val nums = Array.range(1, 100000) val ints = sc.makeRDD(scala.util.Random.shuffle(nums), 2) val topK = ints.top(5) assert(topK.size === 5) assert(topK === nums.reverse.take(5)) } test("top with custom ordering") { val words = Vector("a", "b", "c", "d") implicit val ord = implicitly[Ordering[String]].reverse val rdd = sc.makeRDD(words, 2) val topK = rdd.top(2) assert(topK.size === 2) assert(topK.sorted === Array("b", "a")) } test("takeOrdered with predefined ordering") { val nums = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) val rdd = sc.makeRDD(nums, 2) val sortedLowerK = rdd.takeOrdered(5) assert(sortedLowerK.size === 5) assert(sortedLowerK === Array(1, 2, 3, 4, 5)) } test("takeOrdered with custom ordering") { val nums = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) implicit val ord = implicitly[Ordering[Int]].reverse val rdd = sc.makeRDD(nums, 2) val sortedTopK = rdd.takeOrdered(5) assert(sortedTopK.size === 5) assert(sortedTopK === Array(10, 9, 8, 7, 6)) assert(sortedTopK === nums.sorted(ord).take(5)) } test("takeSample") { val data = sc.parallelize(1 to 100, 2) for (seed <- 1 to 5) { val sample = data.takeSample(withReplacement=false, 20, seed) assert(sample.size === 20) // Got exactly 20 elements assert(sample.toSet.size === 20) // Elements are distinct assert(sample.forall(x => 1 <= x && x <= 100), "elements not in [1, 100]") } for (seed <- 1 to 5) { val sample = data.takeSample(withReplacement=false, 200, seed) assert(sample.size === 100) // Got only 100 elements assert(sample.toSet.size === 100) // Elements are distinct assert(sample.forall(x => 1 <= x && x <= 100), "elements not in [1, 100]") } for (seed <- 1 to 5) { val sample = data.takeSample(withReplacement=true, 20, seed) assert(sample.size === 20) // Got exactly 20 elements assert(sample.forall(x => 1 <= x && x <= 100), "elements not in [1, 100]") } for (seed <- 1 to 5) { val sample = data.takeSample(withReplacement=true, 100, seed) assert(sample.size === 100) // Got exactly 100 elements // Chance of getting all distinct elements is astronomically low, so test we got < 100 assert(sample.toSet.size < 100, "sampling with replacement returned all distinct elements") } for (seed <- 1 to 5) { val sample = data.takeSample(withReplacement=true, 200, seed) assert(sample.size === 200) // Got exactly 200 elements // Chance of getting all distinct elements is still quite low, so test we got < 100 assert(sample.toSet.size < 100, "sampling with replacement returned all distinct elements") } } test("runJob on an invalid partition") { intercept[IllegalArgumentException] { sc.runJob(sc.parallelize(1 to 10, 2), {iter: Iterator[Int] => iter.size}, Seq(0, 1, 2), false) } } }
windeye/spark
core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
Scala
apache-2.0
18,359
/*********************************************************************** * Copyright (c) 2013-2017 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package com.example.geomesa.spark import org.apache.hadoop.conf.Configuration import org.apache.spark.broadcast.Broadcast import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} import org.geotools.data.{DataStoreFinder, _} import org.locationtech.geomesa.accumulo.data.AccumuloDataStore import org.locationtech.geomesa.features.ScalaSimpleFeatureFactory import org.locationtech.geomesa.spark.{GeoMesaSpark, GeoMesaSparkKryoRegistrator} import org.locationtech.geomesa.utils.geotools.{SchemaBuilder, SimpleFeatureTypes} import org.locationtech.jts.geom.Geometry import org.opengis.feature.simple.SimpleFeature import scala.collection.JavaConversions._ object ShallowJoin { val countriesDsParams = Map( "instanceId" -> "mycloud", "zookeepers" -> "zoo1,zoo2,zoo3", "user" -> "user", "password" -> "password", "tableName" -> "geomesa.countries") val gdeltDsParams = Map( "instanceId" -> "mycloud", "zookeepers" -> "zoo1,zoo2,zoo3", "user" -> "user", "password" -> "password", "tableName" -> "geomesa.gdelt") val countriesDs = DataStoreFinder.getDataStore(countriesDsParams).asInstanceOf[AccumuloDataStore] val gdeltDs = DataStoreFinder.getDataStore(gdeltDsParams).asInstanceOf[AccumuloDataStore] def main(args: Array[String]) { val conf = new SparkConf().setAppName("testSpark") val sc = SparkContext.getOrCreate(conf) val rddProviderCountries = GeoMesaSpark(countriesDsParams) val rddProviderGdelt = GeoMesaSpark(gdeltDsParams) val countriesRdd: RDD[SimpleFeature] = rddProviderCountries.rdd(new Configuration(), sc, countriesDsParams, new Query("states")) val gdeltRdd: RDD[SimpleFeature] = rddProviderGdelt.rdd(new Configuration(), sc, gdeltDsParams, new Query("gdelt")) val aggregated = shallowJoin(sc, countriesRdd, gdeltRdd, "STATE_NAME") aggregated.collect.foreach {println} countriesDs.dispose() gdeltDs.dispose() } def shallowJoin(sc: SparkContext, coveringSet: RDD[SimpleFeature], data: RDD[SimpleFeature], key: String): RDD[SimpleFeature] = { // Broadcast sfts to executors GeoMesaSparkKryoRegistrator.broadcast(data) // Broadcast covering set to executors val broadcastedCover = sc.broadcast(coveringSet.collect) // Key data by cover name val keyedData = data.mapPartitions { iter => import org.locationtech.geomesa.utils.geotools.Conversions._ iter.flatMap { sf => // Iterate over covers until a match is found val it = broadcastedCover.value.iterator var container: Option[String] = None while (it.hasNext) { val cover = it.next() // If the cover's polygon contains the feature, // or in the case of non-point geoms, if they intersect, set the container if (cover.geometry.intersects(sf.geometry)) { container = Some(cover.getAttribute(key).asInstanceOf[String]) } } // return the found cover as the key if (container.isDefined) { Some(container.get, sf) } else { None } } } // Get the indices and types of the attributes that can be aggregated and send them to the partitions val countableTypes = Seq("Integer", "Long", "Double") val typeNames = data.first.getType.getTypes.toIndexedSeq.map{t => t.getBinding.getSimpleName.toString} val countableIndices = typeNames.indices.flatMap { index => val featureType = typeNames(index) // Only grab countable types, skipping the ID field if ((countableTypes contains featureType) && index != 0) { Some(index, featureType) } else { None } }.toArray val countable = sc.broadcast(countableIndices) // Create a Simple Feature Type based on what can be aggregated val sftBuilder = new SchemaBuilder() sftBuilder.addString(key) sftBuilder.addMultiPolygon("geom") sftBuilder.addInt("count") val featureProperties = data.first.getProperties.toSeq countableIndices.foreach { case (index, clazz) => val featureName = featureProperties.apply(index).getName clazz match { case "Integer" => sftBuilder.addInt(s"total_$featureName") case "Long" => sftBuilder.addLong(s"total_$featureName") case "Double" => sftBuilder.addDouble(s"total_$featureName") } sftBuilder.addDouble(s"avg_${featureProperties.apply(index).getName}") } val coverSft = sftBuilder.build("aggregate") // Register it with kryo and send it to executors GeoMesaSparkKryoRegistrator.register(Seq(coverSft)) GeoMesaSparkKryoRegistrator.broadcast(keyedData) val coverSftBroadcast = sc.broadcast(SimpleFeatureTypes.encodeType(coverSft)) // Pre-compute known indices and send them to workers val stringAttrs = coverSft.getAttributeDescriptors.map(_.getLocalName) val countIndex = sc.broadcast(stringAttrs.indexOf("count")) // Reduce features by their covering area val aggregate = reduceAndAggregate(keyedData, countable, countIndex, coverSftBroadcast) // Send a map of cover name -> geom to the executors import org.locationtech.geomesa.utils.geotools.Conversions._ val coverMap: scala.collection.Map[String, Geometry] = coveringSet.map{ sf => sf.getAttribute(key).asInstanceOf[String] -> sf.geometry }.collectAsMap val broadcastedCoverMap = sc.broadcast(coverMap) // Compute averages and set cover names and geometries aggregate.mapPartitions { iter => import org.locationtech.geomesa.utils.geotools.Conversions.RichSimpleFeature iter.flatMap{ case (coverName, sf) => if (sf.getType.getTypeName == "aggregate") { sf.getProperties.foreach{ prop => val name = prop.getName.toString if (name.startsWith("total_")) { val count = sf.get[Integer]("count") val avg = prop.getValue match { case a: Integer => a.toDouble / count case a: java.lang.Long => a.toDouble / count case a: java.lang.Double => a / count case _ => throw new Exception(s"couldn't match $name") } sf.setAttribute(s"avg_${name.substring(6)}", avg) } } sf.setAttribute(key, coverName) sf.setDefaultGeometry(broadcastedCoverMap.value.getOrElse(coverName, null)) Some(sf) } else { None } } } } def reduceAndAggregate(keyedData: RDD[(String, SimpleFeature)], countable: Broadcast[Array[(Int, String)]], countIndex: Broadcast[Int], coverSftBroadcast: Broadcast[String]): RDD[(String, SimpleFeature)] = { // Reduce features by their covering area val aggregate = keyedData.reduceByKey((featureA, featureB) => { import org.locationtech.geomesa.utils.geotools.Conversions.RichSimpleFeature val aggregateSft = SimpleFeatureTypes.createType("aggregate", coverSftBroadcast.value) val typeA = featureA.getType.getTypeName val typeB = featureB.getType.getTypeName // Case: combining two aggregate features if (typeA == "aggregate" && typeB == "aggregate") { // Combine the "total" properties (featureA.getProperties, featureB.getProperties).zipped.foreach((propA, propB) => { val name = propA.getName.toString if (propA.getName.toString.startsWith("total_") || propA.getName.toString == "count") { val sum = (propA.getValue, propB.getValue) match { case (a: Integer, b: Integer) => a + b case (a: java.lang.Long, b: java.lang.Long) => a + b case (a: java.lang.Double, b: java.lang.Double) => a + b case _ => throw new Exception("Couldn't match countable type.") } featureA.setAttribute(propA.getName, sum) } }) featureA // Case: combining two regular features } else if (typeA != "aggregate" && typeB != "aggregate") { // Grab each feature's properties val featurePropertiesA = featureA.getProperties.toSeq val featurePropertiesB = featureB.getProperties.toSeq // Create a new aggregate feature to hold the result val featureFields = Seq("empty", featureA.geometry) ++ Seq.fill(aggregateSft.getTypes.size - 2)("0") val aggregateFeature = ScalaSimpleFeatureFactory.buildFeature(aggregateSft, featureFields, featureA.getID) // Loop over the countable properties and sum them for both geonames simple features countable.value.foreach { case (index, clazz) => val propA = featurePropertiesA(index) val propB = featurePropertiesB(index) val valA = if (propA == null) 0 else propA.getValue val valB = if (propB == null) 0 else propB.getValue // Set the total if( propA != null && propB != null) { val sum = (valA, valB) match { case (a: Integer, b: Integer) => a + b case (a: java.lang.Long, b: java.lang.Long) => a + b case (a: java.lang.Double, b: java.lang.Double) => a + b case x => throw new Exception(s"Couldn't match countable type. $x") } aggregateFeature.setAttribute(s"total_${propA.getName.toString}", sum) } else { val sum = if (valA != null) valA else if (valB != null) valB else 0 aggregateFeature.setAttribute(s"total_${propB.getName.toString}", sum) } } aggregateFeature.setAttribute(countIndex.value, new Integer(2)) aggregateFeature // Case: combining a mix } else { // Figure out which feature is which val (aggFeature: SimpleFeature, geoFeature: SimpleFeature) = if (typeA == "aggregate" && typeB != "aggregate") { (featureA, featureB) } else if (typeA != "aggregate" && typeB == "aggregate") { (featureB, featureA) } // Loop over the aggregate feature's properties, adding on the regular feature's properties aggFeature.getProperties.foreach { prop => val name = prop.getName.toString if (name.startsWith("total_")) { val geoProp = geoFeature.getProperty(name.substring(6)) if (geoProp != null) { val sum = (prop.getValue, geoProp.getValue) match { case (a: Integer, b: Integer) => a + b case (a: java.lang.Long, b: java.lang.Long) => a + b case (a: java.lang.Double, b: java.lang.Double) => a + b case _ => 0 } aggFeature.setAttribute(name, sum) } } } aggFeature.setAttribute(countIndex.value, aggFeature.get[Integer](countIndex.value) + 1) aggFeature } }) aggregate } }
geomesa/geomesa-tutorials
geomesa-examples-spark/src/main/scala/com/example/geomesa/spark/ShallowJoin.scala
Scala
apache-2.0
11,448
package varys.framework.master import java.util.concurrent.atomic.AtomicLong import scala.collection.mutable.HashSet import varys.framework.FlowDescription private[varys] class FlowInfo( val desc: FlowDescription) { var source = desc.originHost var destClient:ClientInfo = null var currentBps = 0.0 var lastScheduled: Long = 0L //DNBD bottlenck informantion var bottleneck: Double = 0 var bytesLeft_ = new AtomicLong(desc.sizeInBytes) def bytesLeft: Long = bytesLeft_.get() def setDestination(dClient: ClientInfo) { destClient = dClient } def isLive = (destClient != null && bytesLeft > 0) def getFlowSize() = desc.sizeInBytes def decreaseBytes(byteToDecrease: Long) { bytesLeft_.getAndAdd(-byteToDecrease) } //DNBD it's used to update flowinfo def getFlowId() = desc.id //DNBD it's used to get id of source client def getSourceClientId() = desc.originHostId override def toString:String = "FlowInfo(" + source + " --> " + destClient.host + "[" + desc + "], bytesLeft=" + bytesLeft + ", currentBps=" + currentBps + ")" }
frankfzw/varys
core/src/main/scala/varys/framework/master/FlowInfo.scala
Scala
apache-2.0
1,095
import sbt._ object Idea { // quick 'n dirty way to add Android Facet to IDEA projects val command: Command = Command.command("gen-idea-android") { state => val base = Project.extract (state).currentProject.base transform(base / ".." / ".idea_modules" / "cloudr.iml", "app") transform(base / ".." / ".idea_modules" / "tests.iml", "tests") state } import xml._ import xml.transform._ object ReplaceJdk extends RewriteRule { override def transform(n: Node): Seq[Node] = n match { case e @ Elem(prefix, "orderEntry", attribs, scope, children @ _*) if (e \ "@type").text == "inheritedJdk" => <orderEntry type="jdk" jdkName="Android 3.2 Platform" jdkType="Android SDK" /> case other => other } } object ReplaceJdkTransformer extends RuleTransformer(ReplaceJdk) case class AddFacet(module: String) extends RewriteRule { def path(p: String) = "/../" + module + "/" + p override def transform(n: Node): Seq[Node] = n match { case e @ Elem(prefix, "component", attribs, scope, children @ _*) if (e \ "@name").text == "FacetManager" => <component name="FacetManager"> { children } <facet type="android" name="Android"> <configuration> <option name="GEN_FOLDER_RELATIVE_PATH_APT" value={path("gen")} /> <option name="GEN_FOLDER_RELATIVE_PATH_AIDL" value={path("gen")} /> <option name="MANIFEST_FILE_RELATIVE_PATH" value={path("AndroidManifest.xml")} /> <option name="RES_FOLDER_RELATIVE_PATH" value={path("res")} /> <option name="ASSETS_FOLDER_RELATIVE_PATH" value={path("assets")} /> <option name="LIBS_FOLDER_RELATIVE_PATH" value={path("libs")} /> <option name="REGENERATE_R_JAVA" value="true" /> <option name="REGENERATE_JAVA_BY_AIDL" value="true" /> <option name="USE_CUSTOM_APK_RESOURCE_FOLDER" value="false" /> <option name="CUSTOM_APK_RESOURCE_FOLDER" value="" /> <option name="USE_CUSTOM_COMPILER_MANIFEST" value="false" /> <option name="CUSTOM_COMPILER_MANIFEST" value="" /> <option name="APK_PATH" value="" /> <option name="LIBRARY_PROJECT" value="false" /> <option name="RUN_PROCESS_RESOURCES_MAVEN_TASK" value="true" /> <option name="GENERATE_UNSIGNED_APK" value="false" /> </configuration> </facet> </component> case e @ Elem(prefix, "component", attribs, scope, children @ _*) if (e \ "@name").text == "NewModuleRootManager" => ReplaceJdkTransformer(e) case other => other } } case class AddFacetTransformer(module: String) extends RuleTransformer(AddFacet(module)) def transform(f: java.io.File, module: String) = { val x = XML.loadFile(f) val t = AddFacetTransformer(module)(x) XML.save(f.getAbsolutePath, t) } }
sdb/cloudr
project/Idea.scala
Scala
gpl-3.0
2,972
package controllers import api.StreamHelper import play.api.mvc._ import scala.collection.immutable._ import helper.ImageHelper /** * Stream object controller. */ object Stream extends Controller { import ControllerHelper._ val AcceptsPng = Accepting("image/png") /** * Stream root index page. * * Displays a list of streams for searching. */ def index = Action { implicit request => Ok(views.html.stream.index.render(request)) } /** * Lookup a stream. * * Supports: * png - Render 1x1 image of the current status. * * html - View of the stream. */ def getStream(uri: String) = Action { implicit request => val pathAndExt = uri.split('.') val path = pathAndExt(0) if (pathAndExt.length == 2 && pathAndExt(1) == "png") renderStreamStatusPng(path) else { render { case Accepts.Html() => renderStream(Application.getLocalUser(request), path) case AcceptsPng() => renderStreamStatusPng(path) } } } /** * Render a stream as html. * * Displays a try create page if the stream does not exist but the parent does. */ def renderStream(user: models.User, uri: String)(implicit request: RequestHeader) = models.Stream.findByUri(uri) map { s => Ok(views.html.stream.stream.render(s, s.getChildren(), request)) } getOrElse { tryCreateDescendant(user, uri) } /** * Render a stream's current status as a 1x1 PNG image. */ def renderStreamStatusPng(uri: String) = models.Stream.findByUri(uri) map { s => val img = ImageHelper.createImage(s.status.color) noCache(Ok(ImageHelper.toPng(img))) .as("image/png") } getOrElse(NotFound) /** * Checks if child stream can created and displays a create page. * * A child stream can only be created if its direct parent exists and * is owned by the current user. */ def tryCreateDescendant(user: models.User, uri: String)(implicit request: RequestHeader): Result = StreamHelper.getRawParentPath(uri) flatMap { case (parentUri, childUri) => for ( validChildName <- models.StreamName.fromString(childUri); parent <- models.Stream.findByUri(parentUri); parent <- models.Stream.asOwner(parent, user) ) yield { Ok(views.html.stream.createChild.render(parent.stream, validChildName, request)) } } getOrElse { NotFound(views.html.notFound.render(request)) } /** * Tag collection view. */ def getTag(tag: String) = Action { implicit request => models.StreamTag.fromString(tag) map { tag => Ok(views.html.stream.tag.render(tag, request)) } getOrElse { NotFound(views.html.notFound.render(request)) } } }
Blotre/blotre
app/controllers/StreamController.scala
Scala
mit
2,778
package actors import akka.actor._ import com.bryzek.apidoc.api.v0.models.{Application, Organization, Publication, Subscription, User, Visibility} import db.{ApplicationsDao, Authorization, MembershipsDao, SubscriptionsDao} import javax.inject.{Inject, Singleton} import lib.{Config, Email, Pager, Person} import play.api.Logger import play.api.Play.current object Emails { /** * Context is used to enforce permissions - only delivering email * when the user in fact has access to the specified resource. For * example, if the email is being sent regarding an update to an * application context, we will ensure that the user can actually * view that application prior to sending the update. This allows * users to be subscribed to updates for public applications while * never receiving communciation for non public applications (as an * example). */ sealed trait Context object Context { case class Application(application: com.bryzek.apidoc.api.v0.models.Application) extends Context case object OrganizationAdmin extends Context case object OrganizationMember extends Context } } @Singleton class Emails @Inject() ( applicationsDao: ApplicationsDao, membershipsDao: MembershipsDao, subscriptionsDao: SubscriptionsDao ) { private lazy val sendErrorsTo = Config.requiredString("apidoc.sendErrorsTo").split("\\\\s+") def deliver( context: Emails.Context, org: Organization, publication: Publication, subject: String, body: String ) ( implicit filter: Subscription => Boolean = { _ => true } ) { eachSubscription(context, org, publication, { subscription => Email.sendHtml( to = Person(subscription.user), subject = subject, body = body ) }) } private[this] def eachSubscription( context: Emails.Context, organization: Organization, publication: Publication, f: Subscription => Unit ) { Pager.eachPage[Subscription] { offset => subscriptionsDao.findAll( Authorization.All, organization = Some(organization), publication = Some(publication), limit = 100, offset = offset ) } { subscription => isAuthorized(context, organization, subscription.user) match { case false => { Logger.info(s"Emails: publication[$publication] subscription[$subscription] - not authorized for context[$context]. Skipping email") } case true => { Logger.info(s"Emails: delivering email for publication[$publication] subscription[$subscription]") f(subscription) } } } } private[actors] def isAuthorized( context: Emails.Context, organization: Organization, user: User ): Boolean = { context match { case Emails.Context.Application(app) => { app.visibility match { case Visibility.Public => true case Visibility.User | Visibility.Organization => { applicationsDao.findByGuid(Authorization.User(user.guid), app.guid) match { case None => false case Some(_) => true } } case Visibility.UNDEFINED(name) => { Logger.warn(s"Undefined visibility[$name] -- default behaviour assumes NOT AUTHORIZED") false } } } case Emails.Context.OrganizationAdmin => { membershipsDao.isUserAdmin(user, organization) } case Emails.Context.OrganizationMember => { membershipsDao.isUserMember(user, organization) } } } def sendErrors( subject: String, errors: Seq[String] ) { errors match { case Nil => {} case errors => { val body = views.html.emails.errors(errors).toString sendErrorsTo.foreach { email => Email.sendHtml( to = Person(email), subject = subject, body = body ) } } } } }
movio/apidoc
api/app/actors/Emails.scala
Scala
mit
3,984
package december2016 import java.security.MessageDigest import scala.util.Random /** * Created by Chongguang on 2016/12/17. */ object Day17 { def md5(s: String): String = { MessageDigest.getInstance("MD5").digest(s.getBytes).map("%02X" format _).mkString.toLowerCase } val passcode = "pxxbnzuo" def isOpen(char: Char): Boolean = { char == 'b' || char == 'c' || char =='d' || char == 'e' || char == 'f' } case class Step(x: Int, y:Int, path: String) def possibleMove(s: Step, code: String): List[Step] = { val doorsState = md5(code + s.path).take(4).toList.map(isOpen) val nextSteps = List( Step(s.x, s.y-1, s.path :+ 'U'), Step(s.x, s.y+1, s.path :+ 'D'), Step(s.x-1, s.y, s.path :+ 'L'), Step(s.x+1, s.y, s.path :+ 'R') ) (nextSteps zip doorsState).filter(p=> p._1.x >=0 && p._1.x <=3 && p._1.y >=0 && p._1.y <=3 && p._2 ).map(p=>p._1) } val stepMaxPath = Step(0,0,Random.alphanumeric.take(100).mkString) val stepMinPath = Step(0,0,"") def path(current: Step, code: String, isShortest: Boolean): Step = { if(current.x == 3 && current.y == 3) current else { val moves = possibleMove(current, code) if(moves.isEmpty) if (isShortest) stepMaxPath else stepMinPath else { val paths = for (m <- moves ) yield { path(m, code, isShortest) } if (isShortest) paths.minBy(_.path.length) else paths.maxBy(_.path.length) } } } def main(args: Array[String]): Unit = { val sp = path(Step(0,0,""), passcode, isShortest = true) println(sp.path) val lp = path(Step(0,0,""), passcode, isShortest = false) print(lp.path.length) } }
chongguang/adventofcode
src/main/scala/december2016/Day17.scala
Scala
mit
1,755
package net.itadinanta.rnkr.engine import net.itadinanta.rnkr.core.arbiter.Arbiter import akka.pattern._ import akka.actor._ import net.itadinanta.rnkr.core.arbiter.ActorArbiter import net.itadinanta.rnkr.core.arbiter.ActorGateWrapper import Leaderboard._ import net.itadinanta.rnkr.core.arbiter.Gate /** * Implements the leaderboard contract on top of an Arbiter and a mutable * pre-populated LeaderboardBuffer */ sealed trait ConcurrentLeaderboard extends Leaderboard with Arbiter[LeaderboardBuffer] { import Leaderboard._ override def ->[T](cmd: Command[T]) = cmd match { case c: Read[_] => rqueue(_ -> c)(c.tag) case c: Write => wqueue(_ -> c)(c.tag) } } object ConcurrentLeaderboard { /** * Wraps a non-concurrent mutable leaderboard buffer into an Arbiter which is bound to an actor * * @param buffer the LeaderboardBuffer that needs to be concurrent-safe * @param name the name of the actor that will be created to implement the Arbiter * @param context the parent of the actor */ def apply(buffer: LeaderboardBuffer, name: String)(implicit context: ActorRefFactory): Leaderboard = new ActorArbiter(buffer, name) with ConcurrentLeaderboard }
itadinanta/rnkr
rnkr-engine/src/main/scala/net/itadinanta/rnkr/engine/ConcurrentLeaderboard.scala
Scala
gpl-2.0
1,180
package com.github.etacassiopeia.s99.arithmetic /** * <h1>P39</h1> * Compare the two methods of calculating Euler's totient function * Use the solution of problems P34 and P37 to compare the algorithm. Try to calculate Phi(10090) as an example * * @author Mohsen Zainalpour * @version 1.0 * @since 16/05/16 */ object P38 { def main(args: Array[String]) { import S99Int._ var start = System.nanoTime() println(s"${10090.totient} : ${System.nanoTime() - start}") start = System.nanoTime() println(s"${10090.totientImproved} : ${System.nanoTime() - start}") } }
EtaCassiopeia/S-99
src/main/scala/com/github/etacassiopeia/s99/arithmetic/P38.scala
Scala
apache-2.0
602
package org.ausdigital.apecconnect.db.model import play.api.libs.functional.syntax._ import play.api.libs.json.Reads._ import play.api.libs.json._ import play.api.mvc.PathBindable import scala.language.implicitConversions import scalaz.Equal /** * Container for persisted piece of data, its id and metadata. * @param id the id of the persisted data. * @param data the persisted data. * @param metaData the metadata. * @tparam M the type of the persisted data. */ final case class Record[M](id: RecordId[Record[M]], data: M, metaData: MetaData) extends HasMetaData[Record[M]] with Identifiable[RecordId[Record[M]], Record[M]] { override def updateMetaData(metaData: MetaData = metaData): Record[M] = copy(metaData = metaData) override def updateId(id: RecordId[Record[M]]): Record[M] = copy(id = id) def updateData(f: (M) => M): Record[M] = copy(data = f(data)) } /** * Companion that contains the JSON writer. */ object Record { /** * Writes for Records that flattens the data down to the same level as the id. * * @param dataWrites the object writer for the data contained in the record. * @tparam M the type of the data contained in the record. * @return the writes for the Record containing an M. */ implicit def writes[M](implicit dataWrites: OWrites[M]): Writes[Record[M]] = new Writes[Record[M]] { override def writes(o: Record[M]): JsValue = dataWrites.writes(o.data) ++ Json.obj("id" -> o.id, "metaData" -> o.metaData) } /** * Reads from the JSON format of the Record and convert it to a persisted data model. * * @param dataReads the object reader for the data. * @tparam M the type of the data contained in the record. * @return the reads of the Record. */ implicit def reads[M](implicit dataReads: Reads[M]): Reads[Record[M]] = ( (JsPath \ "id").read[RecordId[Record[M]]] and JsPath.read[M] and (JsPath \ "metaData").read[MetaData] )((id, data, metaData) => Record[M](id, data, metaData)) } /** * Identifier for a Record. Means by which a Record becomes Identifiable. * * @param value the identifier of the record. * @tparam M the type identified by this identifier. */ final case class RecordId[M](value: Long) extends AnyVal object RecordId { implicit def format[M]: Format[RecordId[M]] = new Format[RecordId[M]] { override def writes(o: RecordId[M]): JsValue = JsNumber(o.value) override def reads(json: JsValue): JsResult[RecordId[M]] = json.validate[Long].map(RecordId(_)) } /** * Allows record ids to be parsed from URL paths (e.g. in route files). */ implicit def pathBinder[M](implicit longBinder: PathBindable[Long]): PathBindable[RecordId[M]] = new PathBindable[RecordId[M]] { override def bind(key: String, value: String): Either[String, RecordId[M]] = longBinder.bind(key, value).right.map(RecordId.apply) override def unbind(key: String, recordId: RecordId[M]): String = longBinder.unbind(key, recordId.value) } implicit def ordering[M]: Ordering[RecordId[M]] = Ordering.by { recordId: RecordId[M] => recordId.value } implicit def equals[M]: Equal[RecordId[M]] = Equal.equalA[RecordId[M]] } /** * Implicits and type-class implementations for [[Record]]. */ trait RecordOps { import scalaz._ implicit val recordEqual: Equal[Record[Int]] = Equal.equalA implicit def recordToData[A](record: Record[A]): A = record.data } object RecordOps extends RecordOps
TeamAusDigital/apec-connect
server/modules/db/src/main/scala/org/ausdigital/apecconnect/db/model/Record.scala
Scala
apache-2.0
3,453
package blended.itestsupport.http import scala.concurrent.Await import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Route import blended.util.logging.Logger object TestServer { private[this] val log = Logger[TestServer.type] def withServer(route: Route)( f : Int => Unit )( implicit actorSystem: ActorSystem, ): Unit = { val serverFut = Http().bindAndHandle(route, "localhost", 0) val server = Await.result(serverFut, 10.seconds) try { log.info(s"Started test HTTP server on ${server.localAddress}") f(server.localAddress.getPort) } finally { log.info(s"Stopping test HTTP server on ${server.localAddress}") Await.result(server.unbind(), 10.seconds) } } }
woq-blended/blended
blended.itestsupport/src/test/scala/blended/itestsupport/http/TestServer.scala
Scala
apache-2.0
802
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cogx.compiler.codegenerator.opencl.hyperkernels import cogx.compiler.codegenerator.opencl.fragments.{AddressingMode, HyperKernel} import cogx.platform.types._ import cogx.platform.types.ElementTypes.Float32 import cogx.compiler.parser.op.PolarToComplexOp import cogx.compiler.codegenerator.common.FieldPolicies._ /** Kernel that converts two real scalar fields specifying the magnitude and * phase of a vector (i.e. polar coordinates) to a single complex scalar field. * * @author Dick Carter * * @param in The two input virtual field registers driving this kernel. * @param operation The opcode for this operation. * @param resultType The FieldType of the result of this kernel. * @param addressMode The addressing mode of this kernel. */ private[cogx] class MagnitudePhaseToComplexHyperKernel private (in: Array[VirtualFieldRegister], operation: Opcode, resultType: FieldType, addressMode: AddressingMode) extends HyperKernel(operation, in, resultType, addressMode) { val code = new StringBuilder code append " float magnitude = read(@in0);\n" code append " float phase = read(@in1);\n" code append " float real = magnitude * native_cos(phase);\n" code append " float imaginary = magnitude * native_sin(phase);\n" code append " @out0 = (float2) (real, imaginary);\n" addCode(code.toString()) // debugCompile() } /** Factory object for creating kernels of this type. */ private[cogx] object MagnitudePhaseToComplexHyperKernel extends HyperHelper { /** * Create a kernel that converts two real scalar fields specifying the magnitude and * phase of a vector (i.e. polar coordinates) to a single complex scalar field. * * @param in The two input virtual field registers driving this kernel. * @param operation The opcode for this operation. * @param resultType The FieldType of the result of this kernel. * @return Synthesized hyperkernel for the operation. */ def apply(in: Array[VirtualFieldRegister], operation: Opcode, resultType: FieldType): HyperKernel = { require(in.length == 2) val in0Type = in(0).fieldType val in1Type = in(1).fieldType require(in0Type.elementType == Float32) require(in0Type == in1Type) val expectedResultType = toComplex(in0Type) require(resultType == expectedResultType) require(operation == PolarToComplexOp) val addressing = bestAddressMode(in, resultType) new MagnitudePhaseToComplexHyperKernel(in, operation, resultType, addressing) } }
hpe-cct/cct-core
src/main/scala/cogx/compiler/codegenerator/opencl/hyperkernels/MagnitudePhaseToComplexHyperKernel.scala
Scala
apache-2.0
3,250
/* * Copyright 2015 Avira Operations GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.avira.ds.sparser.test.deprecated import com.avira.ds.sparser.{ParseError, ParseResult, Parser} import org.scalatest.WordSpec import scala.reflect.ClassTag import scala.reflect.runtime.{universe => ru} @deprecated("Use the new com.avira.ds.sparser.test.ParserTestSuite", "0.1.0-SNAPSHOT") abstract class ParserTestSuite[I, O: ru.TypeTag : ClassTag] extends WordSpec { def parser: Parser[I, O] def tests: Seq[ParserTest[I]] start() private val _mirror = ru.runtimeMirror(getClass.getClassLoader) def start(): Unit = { for (test <- tests) { s"""Parsing input "${test.name}"""" should { val ParseResult(actualValueOption, errors, _) = parser.parse(test.input) val errorClasses = errors.map(_.getClass).toSet // Check field values. actualValueOption.foreach { actualValue => for ((fieldName, expectedFieldValue) <- test.expectedValue.fields) { s"""extract field "$fieldName"""" in { assert(getFieldValue(actualValue, fieldName) == expectedFieldValue) } } } // Check errors. for (expectedErrorClass <- test.expectedErrors.errorClasses) { s"""report error "${expectedErrorClass.getCanonicalName}"""" in { assert(errorClasses.contains(expectedErrorClass)) } } } } } private def getFieldValue[T : ru.TypeTag : ClassTag](obj: T, fieldName: String): Any = { val fieldTermSymbol = getTypeTag(obj).tpe.declaration(ru.newTermName(fieldName)).asTerm val objMirror = _mirror.reflect(obj) val fieldMirror = objMirror.reflectField(fieldTermSymbol) fieldMirror.get } def getTypeTag[T : ru.TypeTag](obj: T): ru.TypeTag[T] = ru.typeTag[T] } @deprecated("Use the new com.avira.ds.sparser.test.ParserTestSuite", "0.1.0-SNAPSHOT") case class ParserTest[I]( name: String, input: I, expectedValue: ExpectedValueOption, expectedErrors: ExpectedErrors = ExpectedErrors()) @deprecated("Use the new com.avira.ds.sparser.test.ParserTestSuite", "0.1.0-SNAPSHOT") sealed abstract class ExpectedValueOption { def fields: Seq[(String, Any)] } @deprecated("Use the new com.avira.ds.sparser.test.ParserTestSuite", "0.1.0-SNAPSHOT") case object ExpectedNoValue extends ExpectedValueOption { override val fields: Seq[(String, Any)] = Seq() } @deprecated("Use the new com.avira.ds.sparser.test.ParserTestSuite", "0.1.0-SNAPSHOT") case class ExpectedValue(override val fields: (String, Any)*) extends ExpectedValueOption @deprecated("Use the new com.avira.ds.sparser.test.ParserTestSuite", "0.1.0-SNAPSHOT") case class ExpectedErrors(errorClasses: Class[_ <: ParseError]*)
Avira/sparser
test/src/main/scala/com/avira/ds/sparser/test/deprecated/ParserTestSuite.scala
Scala
apache-2.0
3,291
/* * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.mate1.camus2kafka import org.apache.avro.Schema import org.apache.hadoop.conf.Configuration import scala.annotation.tailrec import org.apache.hadoop.fs.{GlobFilter, FSDataInputStream, FileSystem, Path} import scala.io.Source import scala.collection.JavaConverters._ import org.apache.hadoop.io.LongWritable import com.mate1.camus2kafka.mapper.TimeBasedC2KMapper import com.mate1.camus2kafka.reducer.JsonC2KReducer /** * Created with IntelliJ IDEA. * User: Boris Fersing * Date: 9/27/13 * Time: 10:22 AM */ /** * This object contains the Camus2Kafka config that is used for a C2K job */ object C2KJobConfig { // String values for the config parameters we use val PREFIX = "c2k." val HDFS_INPUT_DIR = PREFIX+"hdfs.input.dir" val AVRO_OUTPUT_SCHEMA_PATH = PREFIX+"avro.output.schema.path" val KAFKA_REPLAY_TOPIC = PREFIX+"kafka.replay.topic" val KAFKA_TOPIC = PREFIX+"kafka.topic" val KAFKA_CONSUMER_GROUP = PREFIX+"kafka.consumer.group" val ZK_HOSTS = PREFIX+"zk.hosts" val CAMUS_DEST_DIR = PREFIX+"camus.dest.dir" val CAMUS_EXEC_DIR = PREFIX+"camus.exec.dir" val CAMUS_HISTORY_DIR = PREFIX+"camus.history.dir" val CAMUS_OFFSETS_DIR = PREFIX+"camus.offsets.dir" val REDUCER_CLASS = PREFIX+"reducer.class" val MAPPER_CLASS = PREFIX+"mapper.class" val MAPPER_OUTKEY_CLASS = PREFIX+"mapper.outkey.class" val SET_ZK_OFFSETS_ONLY = PREFIX+"set.zk.offsets.only" val LOCAL_TMP_DIR = PREFIX+"local.tmp.dir" val PRINTCONF = PREFIX+"printconf" // Map of required parameters with their description val requiredParams = Map( CAMUS_DEST_DIR -> "The camus destination directory.", CAMUS_EXEC_DIR -> "The camus execution directory.", AVRO_OUTPUT_SCHEMA_PATH -> "The HDFS path of the avro schema to be used to encode the Kafka messages.", KAFKA_TOPIC -> "The Kafka topic you want to process.", KAFKA_CONSUMER_GROUP -> ("The (high-level) Kafka consumer group whose ZK offsets will be set by Camus2Kafka so that " + "the regular Kafka topic can be stitched back at the correct cut off point after re-consuming all of the replay topic."), ZK_HOSTS -> "The zookeeper hosts Camus2Kafka will connect to." ) // The config object that is used to get the lazy vals below var config : Configuration = null // Schema to be used to encode the messages we send to Kafka lazy val outputSchema = { val outputSchemaFile = new Path(config.get(AVRO_OUTPUT_SCHEMA_PATH)) val fs : FileSystem = outputSchemaFile.getFileSystem(config) val inputStream : FSDataInputStream = fs.open(outputSchemaFile) val SCHEMA = Source.fromInputStream(inputStream).mkString inputStream.close() new Schema.Parser().parse(SCHEMA) } // The original Kafka topic Camus read from lazy val sourceTopic = config.get(KAFKA_TOPIC) // The consumer group that will consume the logs lazy val consumerGroup = config.get(KAFKA_CONSUMER_GROUP) // The Camus destination dir that contains the data lazy val camusDestDir = config.get(CAMUS_DEST_DIR) // The Camus execution directory that contains the history and the offsets lazy val camusExecDir = config.get(CAMUS_EXEC_DIR) // The zookeeper hosts lazy val zkHosts = config.get(ZK_HOSTS) // Skip the mapreduce job and only set the ZK offsets lazy val setZKOffsetsOnly = config.getBoolean(SET_ZK_OFFSETS_ONLY, false) // The temp dir where the offsets files are copied before processing lazy val localTmpDir = config.get(LOCAL_TMP_DIR, "./offsets_files") // File filter for the offset files val offsetsFilter = new GlobFilter("offsets-m-*") // The Zookeeper consumer path lazy val zkConsumerPath = "/consumers/%s".format(consumerGroup) // The Zookeeper offsets path lazy val zkOffsetsPath = zkConsumerPath + "/offsets/%s".format(sourceTopic) /** * The vals below are optional params that have a default value based on the required params. */ // The HDFS input path that contains the avro data lazy val hdfsInputDir = config.get(HDFS_INPUT_DIR) match { case null => camusDestDir + "/" + sourceTopic + "/hourly/*/*/*/*" case path => path } // The Kafka topic where Camus2Kafka publishes all of the currently ingested records lazy val replayTopic = config.get(KAFKA_REPLAY_TOPIC) match { case null => sourceTopic+"_REPLAY" case topic => topic } // The Camus history dir that contains the offsets lazy val camusHistoryDir = config.get(CAMUS_HISTORY_DIR) match { case null => camusExecDir + "/base/history" case dir => dir } // The mapper class (Default is Camus2KafkaMapperByTime) lazy val mapperClass = config.get(MAPPER_CLASS) match { case null => classOf[TimeBasedC2KMapper].asInstanceOf[Class[AbstractC2KMapper[_]]] case className => Class.forName(className).asInstanceOf[Class[AbstractC2KMapper[_]]] } // The reducer class (Default is Camus2KafkaReducerByTime) lazy val reducerClass = config.get(REDUCER_CLASS) match { case null => classOf[JsonC2KReducer].asInstanceOf[Class[AbstractC2KReducer[_]]] case className => Class.forName(className).asInstanceOf[Class[AbstractC2KReducer[_]]] } // The Mapper Out Key class (Default is LongWritable) lazy val mapperOutKeyClass = config.get(MAPPER_OUTKEY_CLASS) match { case null => classOf[LongWritable] case className => Class.forName(className) } } /** * The C2KJobConfig trait contains methods used to access and set values in the C2KJobConfig object */ trait C2KJobConfig { import C2KJobConfig._ /** * Initialize the configuration. Must be called before running a C2K job * @param conf The config object * @return true if the config is valid, false otherwise */ protected def initConfig(conf: Configuration) : Boolean = { config match { case null => { if (validateConfig(conf)) { config = conf // Make sure that the Schema is valid outputSchema != null } else { false } } case _ => true } } /** * Check if the config is valid and tell the user about the missing params * @param conf The config object * @return true if the config is valid, false otherwise */ protected def validateConfig(conf: Configuration) : Boolean = { conf.get(PRINTCONF) match { case "custom" => conf.asScala.toList.filter(entry => entry.getKey.contains(PREFIX)).sortBy(entry => entry.getKey) .foreach(entry => println(entry.getKey+" : "+entry.getValue)) case "ALL" => conf.asScala.toList.sortBy(entry => entry.getKey).foreach(entry => println(entry.getKey+" : "+entry.getValue)) case _ => () } @tailrec def getMissingParams(required: List[String], missing: List[String]) : List[String] = required match { case Nil => missing case arg::tail => if (conf.get(arg) == null) getMissingParams(tail, arg::missing) else getMissingParams(tail, missing) } getMissingParams(requiredParams.keys.toList, Nil) match { case Nil => { true } case params => { println("Missing parameters:\\n") params.foreach(param => println("%s: %s".format(param, requiredParams.getOrElse(param, "")))) println() println("Please specify the parameters using the -D command line option.") println("Ex: hadoop jar camus2kafka.jar -D %s=value\\n".format(params.head)) false } } } }
mate1/camus2kafka
src/main/scala/com/mate1/camus2kafka/C2KJobConfig.scala
Scala
apache-2.0
8,040
package sangria.execution.deferred import scala.collection.mutable.{Map => MutableMap, Set => MutableSet} import scala.concurrent.Future class Fetcher[Ctx, Res, RelRes, Id]( val idFn: Res => Id, val fetch: (FetcherContext[Ctx], Seq[Id]) => Future[Seq[Res]], val fetchRel: (FetcherContext[Ctx], RelationIds[Res]) => Future[Seq[RelRes]], val config: FetcherConfig ) { def defer(id: Id) = FetcherDeferredOne(this, id) def deferOpt(id: Id) = FetcherDeferredOpt(this, id) def deferOpt(id: Option[Id]) = FetcherDeferredOptOpt(this, id) def deferSeq(ids: Seq[Id]) = FetcherDeferredSeq(this, ids) def deferSeqOpt(ids: Seq[Id]) = FetcherDeferredSeqOpt(this, ids) def deferSeqOptExplicit(ids: Seq[Id]) = FetcherDeferredSeqOptExplicit(this, ids) def deferRel[RelId](rel: Relation[Res, RelRes, RelId], relId: RelId) = FetcherDeferredRel(this, rel, relId) def deferRelOpt[RelId](rel: Relation[Res, RelRes, RelId], relId: RelId) = FetcherDeferredRelOpt(this, rel, relId) def deferRelSeq[RelId](rel: Relation[Res, RelRes, RelId], relId: RelId) = FetcherDeferredRelSeq(this, rel, relId) def deferRelSeqMany[RelId](rel: Relation[Res, RelRes, RelId], relIds: Seq[RelId]) = FetcherDeferredRelSeqMany(this, rel, relIds) def clearCache(deferredResolverState: Any) = findCache(deferredResolverState)(_.clear()) def clearCachedId(deferredResolverState: Any, id: Id) = findCache(deferredResolverState)(_.clearId(id)) def clearCachedRel(deferredResolverState: Any, rel: Relation[Res, _, _]) = findCache(deferredResolverState)(_.clearRel(rel)) def clearCachedRelId[RelId]( deferredResolverState: Any, rel: Relation[Res, _, RelId], relId: RelId) = findCache(deferredResolverState)(_.clearRelId(rel, relId)) private def findCache(deferredResolverState: Any)(op: FetcherCache => Unit): Unit = deferredResolverState match { case map: Map[AnyRef, FetcherCache] @unchecked => map.get(this).foreach(op) case _ => // just ignore } def ids(deferred: Vector[Deferred[Any]]): Vector[Id] = { val allIds = MutableSet[Id]() deferred.foreach { case FetcherDeferredOne(s, id) if s eq this => allIds += id.asInstanceOf[Id] case FetcherDeferredOpt(s, id) if s eq this => allIds += id.asInstanceOf[Id] case FetcherDeferredOptOpt(s, Some(id)) if s eq this => allIds += id.asInstanceOf[Id] case FetcherDeferredSeq(s, ids) if s eq this => allIds ++= ids.asInstanceOf[Seq[Id]] case FetcherDeferredSeqOpt(s, ids) if s eq this => allIds ++= ids.asInstanceOf[Seq[Id]] case FetcherDeferredSeqOptExplicit(s, ids) if s eq this => allIds ++= ids.asInstanceOf[Seq[Id]] case _ => // skip } allIds.toVector } def relIds(deferred: Vector[Deferred[Any]]): Map[Relation[Any, Any, Any], Vector[Any]] = { val allIds = MutableMap[Relation[Any, Any, Any], MutableSet[Any]]() def addToSet(rel: Relation[Any, Any, Any], id: Any) = allIds.getOrElseUpdate(rel, MutableSet[Any]()) += id deferred.foreach { case FetcherDeferredRel(s, rel, relId) if s eq this => addToSet(rel, relId) case FetcherDeferredRelOpt(s, rel, relId) if s eq this => addToSet(rel, relId) case FetcherDeferredRelSeq(s, rel, relId) if s eq this => addToSet(rel, relId) case FetcherDeferredRelSeqMany(s, rel, relIds) if s eq this => relIds.foreach(addToSet(rel, _)) case _ => // skip } allIds.map { case (k, v) => k -> v.toVector }.toMap } def isRel(deferred: Deferred[Any]) = deferred match { case FetcherDeferredRel(_, _, _) | FetcherDeferredRelOpt(_, _, _) | FetcherDeferredRelSeq(_, _, _) | FetcherDeferredRelSeqMany(_, _, _) => true case _ => false } } object Fetcher { private def relationUnsupported[Ctx, Id, Res] : (FetcherContext[Ctx], RelationIds[Res]) => Future[Seq[Res]] = (_, _) => Future.failed(new RelationNotSupportedError) private def relationOnlySupported[Ctx, Id, Res] : (FetcherContext[Ctx], Seq[Id]) => Future[Seq[Res]] = (_, _) => Future.failed(new RelationOnlySupportedError) def apply[Ctx, Res, Id]( fetch: (Ctx, Seq[Id]) => Future[Seq[Res]], config: FetcherConfig = FetcherConfig.empty)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, Res, Id] = new Fetcher[Ctx, Res, Res, Id]( i => id.id(i), (c, ids) => fetch(c.ctx, ids), relationUnsupported, config) def withContext[Ctx, Res, Id]( fetch: (FetcherContext[Ctx], Seq[Id]) => Future[Seq[Res]], config: FetcherConfig = FetcherConfig.empty)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, Res, Id] = new Fetcher[Ctx, Res, Res, Id](i => id.id(i), fetch, relationUnsupported, config) def rel[Ctx, Res, RelRes, Id]( fetch: (Ctx, Seq[Id]) => Future[Seq[Res]], fetchRel: (Ctx, RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.empty)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id]( i => id.id(i), (c, ids) => fetch(c.ctx, ids), (c, ids) => fetchRel(c.ctx, ids), config) def relWithContext[Ctx, Res, RelRes, Id]( fetch: (FetcherContext[Ctx], Seq[Id]) => Future[Seq[Res]], fetchRel: (FetcherContext[Ctx], RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.empty)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id](i => id.id(i), fetch, fetchRel, config) def relOnly[Ctx, Res, RelRes, Id]( fetchRel: (Ctx, RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.empty)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id]( i => id.id(i), relationOnlySupported, (c, ids) => fetchRel(c.ctx, ids), config) def relOnlyWithContext[Ctx, Res, RelRes, Id]( fetchRel: (FetcherContext[Ctx], RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.empty)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id](i => id.id(i), relationOnlySupported, fetchRel, config) def caching[Ctx, Res, Id]( fetch: (Ctx, Seq[Id]) => Future[Seq[Res]], config: FetcherConfig = FetcherConfig.caching)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, Res, Id] = new Fetcher[Ctx, Res, Res, Id]( i => id.id(i), (c, ids) => fetch(c.ctx, ids), relationUnsupported, config) def cachingWithContext[Ctx, Res, Id]( fetch: (FetcherContext[Ctx], Seq[Id]) => Future[Seq[Res]], config: FetcherConfig = FetcherConfig.caching)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, Res, Id] = new Fetcher[Ctx, Res, Res, Id](i => id.id(i), fetch, relationUnsupported, config) def relCaching[Ctx, Res, RelRes, Id]( fetch: (Ctx, Seq[Id]) => Future[Seq[Res]], fetchRel: (Ctx, RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.caching)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id]( i => id.id(i), (c, ids) => fetch(c.ctx, ids), (c, ids) => fetchRel(c.ctx, ids), config) def relCachingWithContext[Ctx, Res, RelRes, Id]( fetch: (FetcherContext[Ctx], Seq[Id]) => Future[Seq[Res]], fetchRel: (FetcherContext[Ctx], RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.caching)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id](i => id.id(i), fetch, fetchRel, config) def relOnlyCaching[Ctx, Res, RelRes, Id]( fetchRel: (Ctx, RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.caching)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id]( i => id.id(i), relationOnlySupported, (c, ids) => fetchRel(c.ctx, ids), config) def relOnlyCachingWithContext[Ctx, Res, RelRes, Id]( fetchRel: (FetcherContext[Ctx], RelationIds[Res]) => Future[Seq[RelRes]], config: FetcherConfig = FetcherConfig.caching)(implicit id: HasId[Res, Id]): Fetcher[Ctx, Res, RelRes, Id] = new Fetcher[Ctx, Res, RelRes, Id](i => id.id(i), relationOnlySupported, fetchRel, config) } case class FetcherConfig( cacheConfig: Option[() => FetcherCache] = None, maxBatchSizeConfig: Option[Int] = None, maxConcurrentBatchesConfig: Option[Int] = None) { def caching = copy(cacheConfig = Some(() => FetcherCache.simple)) def caching(cache: FetcherCache) = copy(cacheConfig = Some(() => cache)) def maxBatchSize(size: Int) = copy(maxBatchSizeConfig = Some(size)) def maxConcurrentBatches(numBatches: Int) = copy(maxConcurrentBatchesConfig = Some(numBatches)) } object FetcherConfig { val empty = FetcherConfig() def caching = empty.caching def caching(cache: FetcherCache) = empty.caching(cache) def maxBatchSize(size: Int) = empty.maxBatchSize(size) def maxConcurrentBatches(numBatches: Int) = empty.maxConcurrentBatches(numBatches) } trait DeferredOne[+T, Id] extends Deferred[T] { def id: Id } trait DeferredOpt[+T, Id] extends Deferred[Option[T]] { def id: Id } trait DeferredOptOpt[+T, Id] extends Deferred[Option[T]] { def id: Option[Id] } trait DeferredSeq[+T, Id] extends Deferred[Seq[T]] { def ids: Seq[Id] } trait DeferredSeqOpt[+T, Id] extends Deferred[Seq[Option[T]]] { def ids: Seq[Id] } trait DeferredRel[T, RelId] extends Deferred[T] { def rel: Relation[T, _, RelId] def relId: RelId } trait DeferredRelOpt[T, RelId] extends Deferred[Option[T]] { def rel: Relation[T, _, RelId] def relId: RelId } trait DeferredRelSeq[T, RelId] extends Deferred[Seq[T]] { def rel: Relation[T, _, RelId] def relId: RelId } trait DeferredRelSeqMany[T, RelId] extends Deferred[Seq[T]] { def rel: Relation[T, _, RelId] def relIds: Seq[RelId] } case class FetcherDeferredOne[Ctx, T, RT, Id](source: Fetcher[Ctx, T, RT, Id], id: Id) extends DeferredOne[T, Id] case class FetcherDeferredOpt[Ctx, T, RT, Id](source: Fetcher[Ctx, T, RT, Id], id: Id) extends DeferredOpt[T, Id] case class FetcherDeferredOptOpt[Ctx, T, RT, Id](source: Fetcher[Ctx, T, RT, Id], id: Option[Id]) extends DeferredOptOpt[T, Id] case class FetcherDeferredSeq[Ctx, T, RT, Id](source: Fetcher[Ctx, T, RT, Id], ids: Seq[Id]) extends DeferredSeq[T, Id] case class FetcherDeferredSeqOpt[Ctx, T, RT, Id](source: Fetcher[Ctx, T, RT, Id], ids: Seq[Id]) extends DeferredSeq[T, Id] case class FetcherDeferredSeqOptExplicit[Ctx, T, RT, Id]( source: Fetcher[Ctx, T, RT, Id], ids: Seq[Id]) extends DeferredSeqOpt[T, Id] case class FetcherDeferredRel[Ctx, RelId, T, Tmp, Id]( source: Fetcher[Ctx, T, Tmp, Id], rel: Relation[T, Tmp, RelId], relId: RelId) extends DeferredRel[T, RelId] case class FetcherDeferredRelOpt[Ctx, RelId, T, Tmp, Id]( source: Fetcher[Ctx, T, Tmp, Id], rel: Relation[T, Tmp, RelId], relId: RelId) extends DeferredRelOpt[T, RelId] case class FetcherDeferredRelSeq[Ctx, RelId, T, Tmp, Id]( source: Fetcher[Ctx, T, Tmp, Id], rel: Relation[T, Tmp, RelId], relId: RelId) extends DeferredRelSeq[T, RelId] case class FetcherDeferredRelSeqMany[Ctx, RelId, T, Tmp, Id]( source: Fetcher[Ctx, T, Tmp, Id], rel: Relation[T, Tmp, RelId], relIds: Seq[RelId]) extends DeferredRelSeqMany[T, RelId] trait Relation[T, Tmp, RelId] { def relIds(value: Tmp): Seq[RelId] def map(value: Tmp): T } object Relation { def apply[T, RelId](name: String, idFn: T => Seq[RelId]): Relation[T, T, RelId] = SimpleRelation[T, T, RelId](name)(idFn, identity) def apply[T, Tmp, RelId]( name: String, idFn: Tmp => Seq[RelId], mapFn: Tmp => T): Relation[T, Tmp, RelId] = SimpleRelation[T, Tmp, RelId](name)(idFn, mapFn) } abstract class AbstractRelation[T, Tmp, RelId](idFn: Tmp => Seq[RelId], mapFn: Tmp => T) extends Relation[T, Tmp, RelId] { def relIds(value: Tmp) = idFn(value) def map(value: Tmp) = mapFn(value) } case class SimpleRelation[T, Tmp, RelId](name: String)(idFn: Tmp => Seq[RelId], mapFn: Tmp => T) extends AbstractRelation[T, Tmp, RelId](idFn, mapFn) case class RelationIds[Res](rawIds: Map[Relation[Res, _, _], Seq[_]]) { def apply[RelId](relation: Relation[Res, _, RelId]): Seq[RelId] = get[RelId](relation).getOrElse(Vector.empty) def get[RelId](relation: Relation[Res, _, RelId]): Option[Seq[RelId]] = rawIds.get(relation).asInstanceOf[Option[Seq[RelId]]] } case class FetcherContext[Ctx]( ctx: Ctx, fetcher: Fetcher[Ctx, _, _, _], cache: Option[FetcherCache], allFetcherCaches: Map[AnyRef, FetcherCache], allFetchers: Vector[Fetcher[Ctx, _, _, _]] ) { def cacheFor(fetcher: Fetcher[_, _, _, _]): Option[FetcherCache] = allFetcherCaches.get(fetcher) } class RelationNotSupportedError extends Exception(s"Relations are not supported by Fetcher.") class RelationOnlySupportedError extends Exception(s"Only relations are supported by Fetcher.")
OlegIlyenko/sangria
modules/core/src/main/scala/sangria/execution/deferred/Fetcher.scala
Scala
apache-2.0
13,219
package zzb.rest import shapeless._ import zzb.rest.unmarshalling.MalformedContent trait ConjunctionMagnet[L <: HList] { type Out def apply(underlying: Directive[L]): Out } object ConjunctionMagnet { implicit def fromDirective[L <: HList, R <: HList](other: Directive[R])(implicit p: Prepender[L, R]) = new ConjunctionMagnet[L] { type Out = Directive[p.Out] def apply(underlying: Directive[L]): Out = new Directive[p.Out] { def happly(f: p.Out ⇒ Route) = underlying.happly { prefix ⇒ other.happly { suffix ⇒ f(p(prefix, suffix)) } } } } implicit def fromStandardRoute[L <: HList](route: StandardRoute) = new ConjunctionMagnet[L] { type Out = StandardRoute def apply(underlying: Directive[L]): Out = StandardRoute(underlying.happly(_ ⇒ route)) } implicit def fromRouteGenerator[T, R <: Route](generator: T ⇒ R) = new ConjunctionMagnet[HNil] { type Out = RouteGenerator[T] def apply(underlying: Directive0): Out = { value ⇒ underlying.happly(_ ⇒ generator(value)) } } } abstract class Directive[L <: HList] { self ⇒ def happly(f: L ⇒ Route): Route def |[R >: L <: HList](that: Directive[R]): Directive[R] = recover(rejections ⇒ directives.BasicDirectives.mapRejections(rejections ::: _) & that) def &(magnet: ConjunctionMagnet[L]): magnet.Out = magnet(this) def as[T](deserializer: HListDeserializer[L, T]): Directive1[T] = new Directive1[T] { def happly(f: T :: HNil ⇒ Route) = self.happly { values ⇒ ctx ⇒ deserializer(values) match { case Right(t) ⇒ f(t :: HNil)(ctx) case Left(MalformedContent(msg, cause)) ⇒ ctx.reject(ValidationRejection(msg, cause)) case Left(error) ⇒ ctx.reject(ValidationRejection(error.toString)) } } } def hmap[R](f: L ⇒ R)(implicit hl: HListable[R]): Directive[hl.Out] = new Directive[hl.Out] { def happly(g: hl.Out ⇒ Route) = self.happly { values ⇒ g(hl(f(values))) } } def hflatMap[R <: HList](f: L ⇒ Directive[R]): Directive[R] = new Directive[R] { def happly(g: R ⇒ Route) = self.happly { values ⇒ f(values).happly(g) } } def hrequire(predicate: L ⇒ Boolean, rejections: Rejection*): Directive0 = new Directive0 { def happly(f: HNil ⇒ Route) = self.happly { values ⇒ ctx ⇒ if (predicate(values)) f(HNil)(ctx) else ctx.reject(rejections: _*) } } def recover[R >: L <: HList](recovery: List[Rejection] ⇒ Directive[R]): Directive[R] = new Directive[R] { def happly(f: R ⇒ Route) = { ctx ⇒ @volatile var rejectedFromInnerRoute = false self.happly({ list ⇒ c ⇒ rejectedFromInnerRoute = true; f(list)(c) }) { ctx.withRejectionHandling { rejections ⇒ if (rejectedFromInnerRoute) ctx.reject(rejections: _*) else recovery(rejections).happly(f)(ctx) } } } } def recoverPF[R >: L <: HList](recovery: PartialFunction[List[Rejection], Directive[R]]): Directive[R] = recover { rejections ⇒ if (recovery.isDefinedAt(rejections)) recovery(rejections) else Route.toDirective(_.reject(rejections: _*)) } } object Directive { /** * A Directive that always passes the request on to its inner route (i.e. does nothing). */ object Empty extends Directive0 { def happly(inner: HNil ⇒ Route) = inner(HNil) } implicit def pimpApply[L <: HList](directive: Directive[L])(implicit hac: ApplyConverter[L]): hac.In ⇒ Route = f ⇒ directive.happly(hac(f)) implicit class SingleValueModifiers[T](underlying: Directive1[T]) { def map[R](f: T ⇒ R)(implicit hl: HListable[R]): Directive[hl.Out] = underlying.hmap { case value :: HNil ⇒ f(value) } def flatMap[R <: HList](f: T ⇒ Directive[R]): Directive[R] = underlying.hflatMap { case value :: HNil ⇒ f(value) } def require(predicate: T ⇒ Boolean, rejections: Rejection*): Directive0 = underlying.hrequire({ case value :: HNil ⇒ predicate(value) }, rejections: _*) } }
stepover/zzb
zzb-rest/src/main/scala/zzb/rest/Directive.scala
Scala
mit
4,268
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import java.sql.{Date, Timestamp} import java.text.SimpleDateFormat import java.time.{Instant, LocalDateTime, ZoneId} import java.util.{Locale, TimeZone} import java.util.concurrent.TimeUnit import org.apache.spark.{SparkException, SparkUpgradeException} import org.apache.spark.sql.catalyst.util.DateTimeTestUtils.{CEST, LA} import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.DoubleType import org.apache.spark.unsafe.types.CalendarInterval class DateFunctionsSuite extends QueryTest with SharedSparkSession { import testImplicits._ test("function current_date") { val df1 = Seq((1, 2), (3, 1)).toDF("a", "b") val d0 = DateTimeUtils.currentDate(ZoneId.systemDefault()) val d1 = DateTimeUtils.fromJavaDate(df1.select(current_date()).collect().head.getDate(0)) val d2 = DateTimeUtils.fromJavaDate( sql("""SELECT CURRENT_DATE()""").collect().head.getDate(0)) val d3 = DateTimeUtils.currentDate(ZoneId.systemDefault()) assert(d0 <= d1 && d1 <= d2 && d2 <= d3 && d3 - d0 <= 1) } test("function current_timestamp and now") { val df1 = Seq((1, 2), (3, 1)).toDF("a", "b") checkAnswer(df1.select(count_distinct(current_timestamp())), Row(1)) // Execution in one query should return the same value checkAnswer(sql("""SELECT CURRENT_TIMESTAMP() = CURRENT_TIMESTAMP()"""), Row(true)) // Current timestamp should return the current timestamp ... val before = System.currentTimeMillis val got = sql("SELECT CURRENT_TIMESTAMP()").collect().head.getTimestamp(0).getTime val after = System.currentTimeMillis assert(got >= before && got <= after) // Now alias checkAnswer(sql("""SELECT CURRENT_TIMESTAMP() = NOW()"""), Row(true)) } val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US) val sdfDate = new SimpleDateFormat("yyyy-MM-dd", Locale.US) val d = new Date(sdf.parse("2015-04-08 13:10:15").getTime) val ts = new Timestamp(sdf.parse("2013-04-08 13:10:15").getTime) test("timestamp comparison with date strings") { val df = Seq( (1, Timestamp.valueOf("2015-01-01 00:00:00")), (2, Timestamp.valueOf("2014-01-01 00:00:00"))).toDF("i", "t") checkAnswer( df.select("t").filter($"t" <= "2014-06-01"), Row(Timestamp.valueOf("2014-01-01 00:00:00")) :: Nil) checkAnswer( df.select("t").filter($"t" >= "2014-06-01"), Row(Timestamp.valueOf("2015-01-01 00:00:00")) :: Nil) } test("date comparison with date strings") { val df = Seq( (1, Date.valueOf("2015-01-01")), (2, Date.valueOf("2014-01-01"))).toDF("i", "t") checkAnswer( df.select("t").filter($"t" <= "2014-06-01"), Row(Date.valueOf("2014-01-01")) :: Nil) checkAnswer( df.select("t").filter($"t" >= "2015"), Row(Date.valueOf("2015-01-01")) :: Nil) } test("date format") { Seq("legacy", "corrected").foreach { legacyParserPolicy => withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) { val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(date_format($"a", "y"), date_format($"b", "y"), date_format($"c", "y")), Row("2015", "2015", "2013")) checkAnswer( df.selectExpr("date_format(a, 'y')", "date_format(b, 'y')", "date_format(c, 'y')"), Row("2015", "2015", "2013")) } } } test("year") { val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(year($"a"), year($"b"), year($"c")), Row(2015, 2015, 2013)) checkAnswer( df.selectExpr("year(a)", "year(b)", "year(c)"), Row(2015, 2015, 2013)) } test("quarter") { val ts = new Timestamp(sdf.parse("2013-11-08 13:10:15").getTime) val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(quarter($"a"), quarter($"b"), quarter($"c")), Row(2, 2, 4)) checkAnswer( df.selectExpr("quarter(a)", "quarter(b)", "quarter(c)"), Row(2, 2, 4)) } test("month") { val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(month($"a"), month($"b"), month($"c")), Row(4, 4, 4)) checkAnswer( df.selectExpr("month(a)", "month(b)", "month(c)"), Row(4, 4, 4)) } test("dayofmonth") { val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(dayofmonth($"a"), dayofmonth($"b"), dayofmonth($"c")), Row(8, 8, 8)) checkAnswer( df.selectExpr("day(a)", "day(b)", "dayofmonth(c)"), Row(8, 8, 8)) } test("dayofyear") { val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(dayofyear($"a"), dayofyear($"b"), dayofyear($"c")), Row(98, 98, 98)) checkAnswer( df.selectExpr("dayofyear(a)", "dayofyear(b)", "dayofyear(c)"), Row(98, 98, 98)) } test("hour") { val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(hour($"a"), hour($"b"), hour($"c")), Row(0, 13, 13)) checkAnswer( df.selectExpr("hour(a)", "hour(b)", "hour(c)"), Row(0, 13, 13)) } test("minute") { val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(minute($"a"), minute($"b"), minute($"c")), Row(0, 10, 10)) checkAnswer( df.selectExpr("minute(a)", "minute(b)", "minute(c)"), Row(0, 10, 10)) } test("second") { val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(second($"a"), second($"b"), second($"c")), Row(0, 15, 15)) checkAnswer( df.selectExpr("second(a)", "second(b)", "second(c)"), Row(0, 15, 15)) } test("weekofyear") { val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c") checkAnswer( df.select(weekofyear($"a"), weekofyear($"b"), weekofyear($"c")), Row(15, 15, 15)) checkAnswer( df.selectExpr("weekofyear(a)", "weekofyear(b)", "weekofyear(c)"), Row(15, 15, 15)) } test("function date_add") { val st1 = "2015-06-01 12:34:56" val st2 = "2015-06-02 12:34:56" val t1 = Timestamp.valueOf(st1) val t2 = Timestamp.valueOf(st2) val s1 = "2015-06-01" val s2 = "2015-06-02" val d1 = Date.valueOf(s1) val d2 = Date.valueOf(s2) val df = Seq((t1, d1, s1, st1), (t2, d2, s2, st2)).toDF("t", "d", "s", "ss") checkAnswer( df.select(date_add(col("d"), 1)), Seq(Row(Date.valueOf("2015-06-02")), Row(Date.valueOf("2015-06-03")))) checkAnswer( df.select(date_add(col("t"), 3)), Seq(Row(Date.valueOf("2015-06-04")), Row(Date.valueOf("2015-06-05")))) checkAnswer( df.select(date_add(col("s"), 5)), Seq(Row(Date.valueOf("2015-06-06")), Row(Date.valueOf("2015-06-07")))) checkAnswer( df.select(date_add(col("ss"), 7)), Seq(Row(Date.valueOf("2015-06-08")), Row(Date.valueOf("2015-06-09")))) checkAnswer( df.withColumn("x", lit(1)).select(date_add(col("d"), col("x"))), Seq(Row(Date.valueOf("2015-06-02")), Row(Date.valueOf("2015-06-03")))) checkAnswer(df.selectExpr("DATE_ADD(null, 1)"), Seq(Row(null), Row(null))) checkAnswer( df.selectExpr("""DATE_ADD(d, 1)"""), Seq(Row(Date.valueOf("2015-06-02")), Row(Date.valueOf("2015-06-03")))) } test("function date_sub") { val st1 = "2015-06-01 12:34:56" val st2 = "2015-06-02 12:34:56" val t1 = Timestamp.valueOf(st1) val t2 = Timestamp.valueOf(st2) val s1 = "2015-06-01" val s2 = "2015-06-02" val d1 = Date.valueOf(s1) val d2 = Date.valueOf(s2) val df = Seq((t1, d1, s1, st1), (t2, d2, s2, st2)).toDF("t", "d", "s", "ss") checkAnswer( df.select(date_sub(col("d"), 1)), Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01")))) checkAnswer( df.select(date_sub(col("t"), 1)), Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01")))) checkAnswer( df.select(date_sub(col("s"), 1)), Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01")))) checkAnswer( df.select(date_sub(col("ss"), 1)), Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01")))) checkAnswer( df.select(date_sub(lit(null), 1)).limit(1), Row(null)) checkAnswer( df.withColumn("x", lit(1)).select(date_sub(col("d"), col("x"))), Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01")))) checkAnswer(df.selectExpr("""DATE_SUB(d, null)"""), Seq(Row(null), Row(null))) checkAnswer( df.selectExpr("""DATE_SUB(d, 1)"""), Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01")))) } test("time_add") { val t1 = Timestamp.valueOf("2015-07-31 23:59:59") val t2 = Timestamp.valueOf("2015-12-31 00:00:00") val d1 = Date.valueOf("2015-07-31") val d2 = Date.valueOf("2015-12-31") val i = new CalendarInterval(2, 2, 2000000L) val df = Seq((1, t1, d1), (3, t2, d2)).toDF("n", "t", "d") checkAnswer( df.selectExpr(s"d + INTERVAL'${i.toString}'"), Seq(Row(Date.valueOf("2015-10-02")), Row(Date.valueOf("2016-03-02")))) checkAnswer( df.selectExpr(s"t + INTERVAL'${i.toString}'"), Seq(Row(Timestamp.valueOf("2015-10-03 00:00:01")), Row(Timestamp.valueOf("2016-03-02 00:00:02")))) } test("time_sub") { val t1 = Timestamp.valueOf("2015-10-01 00:00:01") val t2 = Timestamp.valueOf("2016-02-29 00:00:02") val d1 = Date.valueOf("2015-09-30") val d2 = Date.valueOf("2016-02-29") val i = new CalendarInterval(2, 2, 2000000L) val df = Seq((1, t1, d1), (3, t2, d2)).toDF("n", "t", "d") checkAnswer( df.selectExpr(s"d - INTERVAL'${i.toString}'"), Seq(Row(Date.valueOf("2015-07-27")), Row(Date.valueOf("2015-12-26")))) checkAnswer( df.selectExpr(s"t - INTERVAL'${i.toString}'"), Seq(Row(Timestamp.valueOf("2015-07-29 23:59:59")), Row(Timestamp.valueOf("2015-12-27 00:00:00")))) } test("function make_interval") { val t1 = Timestamp.valueOf("2015-10-01 00:00:01") val t2 = Timestamp.valueOf("2016-02-29 00:00:02") val df = Seq((t1), (t2)).toDF("t") // adds two hours to times checkAnswer( df.select(col("t") + make_interval(hours = lit(2))), Seq(Row(Timestamp.valueOf("2015-10-01 02:00:01")), Row(Timestamp.valueOf("2016-02-29 02:00:02")))) // adds four days and two hours to times checkAnswer( df.select(col("t") + make_interval(hours = lit(2), days = lit(4))), Seq(Row(Timestamp.valueOf("2015-10-05 02:00:01")), Row(Timestamp.valueOf("2016-03-04 02:00:02")))) // subtracts two hours from times checkAnswer( df.select(col("t") + make_interval(hours = lit(-2))), Seq(Row(Timestamp.valueOf("2015-09-30 22:00:01")), Row(Timestamp.valueOf("2016-02-28 22:00:02")))) val d1 = Date.valueOf("2015-08-31") val d2 = Date.valueOf("2015-02-28") val df2 = Seq((d1), (d2)).toDF("d") // adding an hour to a date does nothing checkAnswer( df2.select(col("d") + make_interval(hours = lit(1))), Seq(Row(Date.valueOf("2015-08-31")), Row(Date.valueOf("2015-02-28")))) // adds three years to date checkAnswer( df2.select(col("d") + make_interval(years = lit(3))), Seq(Row(Date.valueOf("2018-08-31")), Row(Date.valueOf("2018-02-28")))) // subtracts 1 week, one day from date checkAnswer( df2.select(col("d") - make_interval(weeks = lit(1), days = lit(1))), Seq(Row(Date.valueOf("2015-08-23")), Row(Date.valueOf("2015-02-20")))) } test("function add_months") { val d1 = Date.valueOf("2015-08-31") val d2 = Date.valueOf("2015-02-28") val df = Seq((1, d1), (2, d2)).toDF("n", "d") checkAnswer( df.select(add_months(col("d"), 1)), Seq(Row(Date.valueOf("2015-09-30")), Row(Date.valueOf("2015-03-28")))) checkAnswer( df.selectExpr("add_months(d, -1)"), Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-01-28")))) checkAnswer( df.withColumn("x", lit(1)).select(add_months(col("d"), col("x"))), Seq(Row(Date.valueOf("2015-09-30")), Row(Date.valueOf("2015-03-28")))) } test("function months_between") { val d1 = Date.valueOf("2015-07-31") val d2 = Date.valueOf("2015-02-16") val t1 = Timestamp.valueOf("2014-09-30 23:30:00") val t2 = Timestamp.valueOf("2015-09-16 12:00:00") val s1 = "2014-09-15 11:30:00" val s2 = "2015-10-01 00:00:00" val df = Seq((t1, d1, s1), (t2, d2, s2)).toDF("t", "d", "s") checkAnswer(df.select(months_between(col("t"), col("d"))), Seq(Row(-10.0), Row(7.0))) checkAnswer(df.selectExpr("months_between(t, s)"), Seq(Row(0.5), Row(-0.5))) checkAnswer(df.selectExpr("months_between(t, s, true)"), Seq(Row(0.5), Row(-0.5))) Seq(true, false).foreach { roundOff => checkAnswer(df.select(months_between(col("t"), col("d"), roundOff)), Seq(Row(-10.0), Row(7.0))) checkAnswer(df.withColumn("r", lit(false)).selectExpr("months_between(t, s, r)"), Seq(Row(0.5), Row(-0.5))) } } test("function last_day") { val df1 = Seq((1, "2015-07-23"), (2, "2015-07-24")).toDF("i", "d") val df2 = Seq((1, "2015-07-23 00:11:22"), (2, "2015-07-24 11:22:33")).toDF("i", "t") checkAnswer( df1.select(last_day(col("d"))), Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-07-31")))) checkAnswer( df2.select(last_day(col("t"))), Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-07-31")))) } test("function next_day") { val df1 = Seq(("mon", "2015-07-23"), ("tuesday", "2015-07-20")).toDF("dow", "d") val df2 = Seq(("th", "2015-07-23 00:11:22"), ("xx", "2015-07-24 11:22:33")).toDF("dow", "t") checkAnswer( df1.select( next_day(col("d"), "MONDAY"), next_day(col("d"), col("dow")), next_day(col("d"), "NonValidDay")), Seq( Row(Date.valueOf("2015-07-27"), Date.valueOf("2015-07-27"), null), Row(Date.valueOf("2015-07-27"), Date.valueOf("2015-07-21"), null))) checkAnswer( df2.select( next_day(col("t"), "th"), next_day(col("t"), col("dow")), next_day(col("t"), "NonValidDay")), Seq( Row(Date.valueOf("2015-07-30"), Date.valueOf("2015-07-30"), null), Row(Date.valueOf("2015-07-30"), null, null))) } def checkExceptionMessage(df: DataFrame): Unit = { val message = intercept[SparkException] { df.collect() }.getCause.getMessage assert(message.contains("Fail to parse")) } test("function to_date") { val d1 = Date.valueOf("2015-07-22") val d2 = Date.valueOf("2015-07-01") val d3 = Date.valueOf("2014-12-31") val t1 = Timestamp.valueOf("2015-07-22 10:00:00") val t2 = Timestamp.valueOf("2014-12-31 23:59:59") val t3 = Timestamp.valueOf("2014-12-31 23:59:59") val s1 = "2015-07-22 10:00:00" val s2 = "2014-12-31" val s3 = "2014-31-12" val df = Seq((d1, t1, s1), (d2, t2, s2), (d3, t3, s3)).toDF("d", "t", "s") checkAnswer( df.select(to_date(col("t"))), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(Date.valueOf("2014-12-31")))) checkAnswer( df.select(to_date(col("d"))), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")), Row(Date.valueOf("2014-12-31")))) checkAnswer( df.select(to_date(col("s"))), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(null))) checkAnswer( df.selectExpr("to_date(t)"), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(Date.valueOf("2014-12-31")))) checkAnswer( df.selectExpr("to_date(d)"), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")), Row(Date.valueOf("2014-12-31")))) checkAnswer( df.selectExpr("to_date(s)"), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(null))) // now with format checkAnswer( df.select(to_date(col("t"), "yyyy-MM-dd")), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(Date.valueOf("2014-12-31")))) checkAnswer( df.select(to_date(col("d"), "yyyy-MM-dd")), Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")), Row(Date.valueOf("2014-12-31")))) val confKey = SQLConf.LEGACY_TIME_PARSER_POLICY.key withSQLConf(confKey -> "corrected") { checkAnswer( df.select(to_date(col("s"), "yyyy-MM-dd")), Seq(Row(null), Row(Date.valueOf("2014-12-31")), Row(null))) } withSQLConf(confKey -> "exception") { checkExceptionMessage(df.select(to_date(col("s"), "yyyy-MM-dd"))) } // now switch format checkAnswer( df.select(to_date(col("s"), "yyyy-dd-MM")), Seq(Row(null), Row(null), Row(Date.valueOf("2014-12-31")))) // invalid format checkAnswer( df.select(to_date(col("s"), "yyyy-hh-MM")), Seq(Row(null), Row(null), Row(null))) val e = intercept[SparkUpgradeException](df.select(to_date(col("s"), "yyyy-dd-aa")).collect()) assert(e.getCause.isInstanceOf[IllegalArgumentException]) assert(e.getMessage.contains("You may get a different result due to the upgrading of Spark")) // February val x1 = "2016-02-29" val x2 = "2017-02-29" val df1 = Seq(x1, x2).toDF("x") checkAnswer( df1.select(to_date(col("x"))), Row(Date.valueOf("2016-02-29")) :: Row(null) :: Nil) } test("function trunc") { val df = Seq( (1, Timestamp.valueOf("2015-07-22 10:00:00")), (2, Timestamp.valueOf("2014-12-31 00:00:00"))).toDF("i", "t") checkAnswer( df.select(trunc(col("t"), "YY")), Seq(Row(Date.valueOf("2015-01-01")), Row(Date.valueOf("2014-01-01")))) checkAnswer( df.selectExpr("trunc(t, 'Month')"), Seq(Row(Date.valueOf("2015-07-01")), Row(Date.valueOf("2014-12-01")))) } test("function date_trunc") { val df = Seq( (1, Timestamp.valueOf("2015-07-22 10:01:40.123456")), (2, Timestamp.valueOf("2014-12-31 05:29:06.123456"))).toDF("i", "t") checkAnswer( df.select(date_trunc("YY", col("t"))), Seq(Row(Timestamp.valueOf("2015-01-01 00:00:00")), Row(Timestamp.valueOf("2014-01-01 00:00:00")))) checkAnswer( df.selectExpr("date_trunc('MONTH', t)"), Seq(Row(Timestamp.valueOf("2015-07-01 00:00:00")), Row(Timestamp.valueOf("2014-12-01 00:00:00")))) checkAnswer( df.selectExpr("date_trunc('DAY', t)"), Seq(Row(Timestamp.valueOf("2015-07-22 00:00:00")), Row(Timestamp.valueOf("2014-12-31 00:00:00")))) checkAnswer( df.selectExpr("date_trunc('HOUR', t)"), Seq(Row(Timestamp.valueOf("2015-07-22 10:00:00")), Row(Timestamp.valueOf("2014-12-31 05:00:00")))) checkAnswer( df.selectExpr("date_trunc('MINUTE', t)"), Seq(Row(Timestamp.valueOf("2015-07-22 10:01:00")), Row(Timestamp.valueOf("2014-12-31 05:29:00")))) checkAnswer( df.selectExpr("date_trunc('SECOND', t)"), Seq(Row(Timestamp.valueOf("2015-07-22 10:01:40")), Row(Timestamp.valueOf("2014-12-31 05:29:06")))) checkAnswer( df.selectExpr("date_trunc('WEEK', t)"), Seq(Row(Timestamp.valueOf("2015-07-20 00:00:00")), Row(Timestamp.valueOf("2014-12-29 00:00:00")))) checkAnswer( df.selectExpr("date_trunc('QUARTER', t)"), Seq(Row(Timestamp.valueOf("2015-07-01 00:00:00")), Row(Timestamp.valueOf("2014-10-01 00:00:00")))) checkAnswer( df.selectExpr("date_trunc('MILLISECOND', t)"), Seq(Row(Timestamp.valueOf("2015-07-22 10:01:40.123")), Row(Timestamp.valueOf("2014-12-31 05:29:06.123")))) } test("unsupported fmt fields for trunc/date_trunc results null") { Seq("INVALID", "decade", "century", "millennium", "whatever", null).foreach { f => checkAnswer( Seq(Date.valueOf("2014-12-31")) .toDF("dt") .selectExpr(s"date_trunc('$f', dt)", "trunc(dt, '$f')"), Row(null, null)) } } test("from_unixtime") { Seq("corrected", "legacy").foreach { legacyParserPolicy => withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) { val sdf1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US) val fmt2 = "yyyy-MM-dd HH:mm:ss.SSS" val sdf2 = new SimpleDateFormat(fmt2, Locale.US) val fmt3 = "yy-MM-dd HH-mm-ss" val sdf3 = new SimpleDateFormat(fmt3, Locale.US) val df = Seq((1000, "yyyy-MM-dd HH:mm:ss.SSS"), (-1000, "yy-MM-dd HH-mm-ss")).toDF("a", "b") checkAnswer( df.select(from_unixtime(col("a"))), Seq(Row(sdf1.format(new Timestamp(1000000))), Row(sdf1.format(new Timestamp(-1000000))))) checkAnswer( df.select(from_unixtime(col("a"), fmt2)), Seq(Row(sdf2.format(new Timestamp(1000000))), Row(sdf2.format(new Timestamp(-1000000))))) checkAnswer( df.select(from_unixtime(col("a"), fmt3)), Seq(Row(sdf3.format(new Timestamp(1000000))), Row(sdf3.format(new Timestamp(-1000000))))) checkAnswer( df.selectExpr("from_unixtime(a)"), Seq(Row(sdf1.format(new Timestamp(1000000))), Row(sdf1.format(new Timestamp(-1000000))))) checkAnswer( df.selectExpr(s"from_unixtime(a, '$fmt2')"), Seq(Row(sdf2.format(new Timestamp(1000000))), Row(sdf2.format(new Timestamp(-1000000))))) checkAnswer( df.selectExpr(s"from_unixtime(a, '$fmt3')"), Seq(Row(sdf3.format(new Timestamp(1000000))), Row(sdf3.format(new Timestamp(-1000000))))) } } } private def secs(millis: Long): Long = TimeUnit.MILLISECONDS.toSeconds(millis) test("unix_timestamp") { Seq("corrected", "legacy").foreach { legacyParserPolicy => withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) { val date1 = Date.valueOf("2015-07-24") val date2 = Date.valueOf("2015-07-25") val ts1 = Timestamp.valueOf("2015-07-24 10:00:00.3") val ts2 = Timestamp.valueOf("2015-07-25 02:02:02.2") val s1 = "2015/07/24 10:00:00.5" val s2 = "2015/07/25 02:02:02.6" val ss1 = "2015-07-24 10:00:00" val ss2 = "2015-07-25 02:02:02" val fmt = "yyyy/MM/dd HH:mm:ss.S" val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss") checkAnswer(df.select(unix_timestamp(col("ts"))), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) checkAnswer(df.select(unix_timestamp(col("ss"))), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) checkAnswer(df.select(unix_timestamp(col("d"), fmt)), Seq( Row(secs(date1.getTime)), Row(secs(date2.getTime)))) checkAnswer(df.select(unix_timestamp(col("s"), fmt)), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) checkAnswer(df.selectExpr("unix_timestamp(ts)"), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) checkAnswer(df.selectExpr("unix_timestamp(ss)"), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) checkAnswer(df.selectExpr(s"unix_timestamp(d, '$fmt')"), Seq( Row(secs(date1.getTime)), Row(secs(date2.getTime)))) checkAnswer(df.selectExpr(s"unix_timestamp(s, '$fmt')"), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) val x1 = "2015-07-24 10:00:00" val x2 = "2015-25-07 02:02:02" val x3 = "2015-07-24 25:02:02" val x4 = "2015-24-07 26:02:02" val ts3 = Timestamp.valueOf("2015-07-24 02:25:02") val ts4 = Timestamp.valueOf("2015-07-24 00:10:00") val df1 = Seq(x1, x2, x3, x4).toDF("x") checkAnswer(df1.select(unix_timestamp(col("x"))), Seq( Row(secs(ts1.getTime)), Row(null), Row(null), Row(null))) checkAnswer(df1.selectExpr("unix_timestamp(x)"), Seq( Row(secs(ts1.getTime)), Row(null), Row(null), Row(null))) checkAnswer(df1.select(unix_timestamp(col("x"), "yyyy-dd-MM HH:mm:ss")), Seq( Row(null), Row(secs(ts2.getTime)), Row(null), Row(null))) checkAnswer(df1.selectExpr(s"unix_timestamp(x, 'yyyy-MM-dd mm:HH:ss')"), Seq( Row(secs(ts4.getTime)), Row(null), Row(secs(ts3.getTime)), Row(null))) // invalid format val invalid = df1.selectExpr(s"unix_timestamp(x, 'yyyy-MM-dd aa:HH:ss')") if (legacyParserPolicy == "legacy") { checkAnswer(invalid, Seq(Row(null), Row(null), Row(null), Row(null))) } else { val e = intercept[SparkUpgradeException](invalid.collect()) assert(e.getCause.isInstanceOf[IllegalArgumentException]) assert( e.getMessage.contains("You may get a different result due to the upgrading of Spark")) } // February val y1 = "2016-02-29" val y2 = "2017-02-29" val ts5 = Timestamp.valueOf("2016-02-29 00:00:00") val df2 = Seq(y1, y2).toDF("y") checkAnswer(df2.select(unix_timestamp(col("y"), "yyyy-MM-dd")), Seq( Row(secs(ts5.getTime)), Row(null))) val now = sql("select unix_timestamp()").collect().head.getLong(0) checkAnswer( sql(s"select timestamp_seconds($now)"), Row(new java.util.Date(TimeUnit.SECONDS.toMillis(now)))) } } } test("to_unix_timestamp") { Seq("corrected", "legacy").foreach { legacyParserPolicy => withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) { val date1 = Date.valueOf("2015-07-24") val date2 = Date.valueOf("2015-07-25") val ts1 = Timestamp.valueOf("2015-07-24 10:00:00.3") val ts2 = Timestamp.valueOf("2015-07-25 02:02:02.2") val s1 = "2015/07/24 10:00:00.5" val s2 = "2015/07/25 02:02:02.6" val ss1 = "2015-07-24 10:00:00" val ss2 = "2015-07-25 02:02:02" val fmt = "yyyy/MM/dd HH:mm:ss.S" val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss") checkAnswer(df.selectExpr("to_unix_timestamp(ts)"), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) checkAnswer(df.selectExpr("to_unix_timestamp(ss)"), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) checkAnswer(df.selectExpr(s"to_unix_timestamp(d, '$fmt')"), Seq( Row(secs(date1.getTime)), Row(secs(date2.getTime)))) checkAnswer(df.selectExpr(s"to_unix_timestamp(s, '$fmt')"), Seq( Row(secs(ts1.getTime)), Row(secs(ts2.getTime)))) val x1 = "2015-07-24 10:00:00" val x2 = "2015-25-07 02:02:02" val x3 = "2015-07-24 25:02:02" val x4 = "2015-24-07 26:02:02" val ts3 = Timestamp.valueOf("2015-07-24 02:25:02") val ts4 = Timestamp.valueOf("2015-07-24 00:10:00") val df1 = Seq(x1, x2, x3, x4).toDF("x") checkAnswer(df1.selectExpr("to_unix_timestamp(x)"), Seq( Row(secs(ts1.getTime)), Row(null), Row(null), Row(null))) checkAnswer(df1.selectExpr(s"to_unix_timestamp(x, 'yyyy-MM-dd mm:HH:ss')"), Seq( Row(secs(ts4.getTime)), Row(null), Row(secs(ts3.getTime)), Row(null))) // February val y1 = "2016-02-29" val y2 = "2017-02-29" val ts5 = Timestamp.valueOf("2016-02-29 00:00:00") val df2 = Seq(y1, y2).toDF("y") checkAnswer(df2.select(unix_timestamp(col("y"), "yyyy-MM-dd")), Seq( Row(secs(ts5.getTime)), Row(null))) // invalid format val invalid = df1.selectExpr(s"to_unix_timestamp(x, 'yyyy-MM-dd bb:HH:ss')") val e = intercept[IllegalArgumentException](invalid.collect()) assert(e.getMessage.contains('b')) } } } test("to_timestamp") { Seq("legacy", "corrected").foreach { legacyParserPolicy => withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> legacyParserPolicy) { val date1 = Date.valueOf("2015-07-24") val date2 = Date.valueOf("2015-07-25") val ts_date1 = Timestamp.valueOf("2015-07-24 00:00:00") val ts_date2 = Timestamp.valueOf("2015-07-25 00:00:00") val ts1 = Timestamp.valueOf("2015-07-24 10:00:00") val ts2 = Timestamp.valueOf("2015-07-25 02:02:02") val s1 = "2015/07/24 10:00:00.5" val s2 = "2015/07/25 02:02:02.6" val ts1m = Timestamp.valueOf("2015-07-24 10:00:00.5") val ts2m = Timestamp.valueOf("2015-07-25 02:02:02.6") val ss1 = "2015-07-24 10:00:00" val ss2 = "2015-07-25 02:02:02" val fmt = "yyyy/MM/dd HH:mm:ss.S" val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss") checkAnswer(df.select(to_timestamp(col("ss"))), df.select(timestamp_seconds(unix_timestamp(col("ss"))))) checkAnswer(df.select(to_timestamp(col("ss"))), Seq( Row(ts1), Row(ts2))) if (legacyParserPolicy == "legacy") { // In Spark 2.4 and earlier, to_timestamp() parses in seconds precision and cuts off // the fractional part of seconds. The behavior was changed by SPARK-27438. val legacyFmt = "yyyy/MM/dd HH:mm:ss" checkAnswer(df.select(to_timestamp(col("s"), legacyFmt)), Seq( Row(ts1), Row(ts2))) } else { checkAnswer(df.select(to_timestamp(col("s"), fmt)), Seq( Row(ts1m), Row(ts2m))) } checkAnswer(df.select(to_timestamp(col("ts"), fmt)), Seq( Row(ts1), Row(ts2))) checkAnswer(df.select(to_timestamp(col("d"), "yyyy-MM-dd")), Seq( Row(ts_date1), Row(ts_date2))) } } } test("datediff") { val df = Seq( (Date.valueOf("2015-07-24"), Timestamp.valueOf("2015-07-24 01:00:00"), "2015-07-23", "2015-07-23 03:00:00"), (Date.valueOf("2015-07-25"), Timestamp.valueOf("2015-07-25 02:00:00"), "2015-07-24", "2015-07-24 04:00:00") ).toDF("a", "b", "c", "d") checkAnswer(df.select(datediff(col("a"), col("b"))), Seq(Row(0), Row(0))) checkAnswer(df.select(datediff(col("a"), col("c"))), Seq(Row(1), Row(1))) checkAnswer(df.select(datediff(col("d"), col("b"))), Seq(Row(-1), Row(-1))) checkAnswer(df.selectExpr("datediff(a, d)"), Seq(Row(1), Row(1))) } test("to_timestamp with microseconds precision") { withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true") { val timestamp = "1970-01-01T00:00:00.123456Z" val df = Seq(timestamp).toDF("t") checkAnswer(df.select(to_timestamp($"t", "yyyy-MM-dd'T'HH:mm:ss.SSSSSSX")), Seq(Row(Instant.parse(timestamp)))) } } test("from_utc_timestamp with literal zone") { val df = Seq( (Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00"), (Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00") ).toDF("a", "b") checkAnswer( df.select(from_utc_timestamp(col("a"), LA.getId)), Seq( Row(Timestamp.valueOf("2015-07-23 17:00:00")), Row(Timestamp.valueOf("2015-07-24 17:00:00")))) checkAnswer( df.select(from_utc_timestamp(col("b"), LA.getId)), Seq( Row(Timestamp.valueOf("2015-07-23 17:00:00")), Row(Timestamp.valueOf("2015-07-24 17:00:00")))) } test("from_utc_timestamp with column zone") { val df = Seq( (Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00", CEST.getId), (Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00", LA.getId) ).toDF("a", "b", "c") checkAnswer( df.select(from_utc_timestamp(col("a"), col("c"))), Seq( Row(Timestamp.valueOf("2015-07-24 02:00:00")), Row(Timestamp.valueOf("2015-07-24 17:00:00")))) checkAnswer( df.select(from_utc_timestamp(col("b"), col("c"))), Seq( Row(Timestamp.valueOf("2015-07-24 02:00:00")), Row(Timestamp.valueOf("2015-07-24 17:00:00")))) } test("handling null field by date_part") { val input = Seq(Date.valueOf("2019-09-20")).toDF("d") Seq("date_part(null, d)", "date_part(null, date'2019-09-20')").foreach { expr => val df = input.selectExpr(expr) assert(df.schema.headOption.get.dataType == DoubleType) checkAnswer(df, Row(null)) } } test("to_utc_timestamp with literal zone") { val df = Seq( (Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00"), (Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00") ).toDF("a", "b") checkAnswer( df.select(to_utc_timestamp(col("a"), LA.getId)), Seq( Row(Timestamp.valueOf("2015-07-24 07:00:00")), Row(Timestamp.valueOf("2015-07-25 07:00:00")))) checkAnswer( df.select(to_utc_timestamp(col("b"), LA.getId)), Seq( Row(Timestamp.valueOf("2015-07-24 07:00:00")), Row(Timestamp.valueOf("2015-07-25 07:00:00")))) } test("to_utc_timestamp with column zone") { val df = Seq( (Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00", LA.getId), (Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00", CEST.getId) ).toDF("a", "b", "c") checkAnswer( df.select(to_utc_timestamp(col("a"), col("c"))), Seq( Row(Timestamp.valueOf("2015-07-24 07:00:00")), Row(Timestamp.valueOf("2015-07-24 22:00:00")))) checkAnswer( df.select(to_utc_timestamp(col("b"), col("c"))), Seq( Row(Timestamp.valueOf("2015-07-24 07:00:00")), Row(Timestamp.valueOf("2015-07-24 22:00:00")))) } test("SPARK-30668: use legacy timestamp parser in to_timestamp") { val confKey = SQLConf.LEGACY_TIME_PARSER_POLICY.key val df = Seq("2020-01-27T20:06:11.847-0800").toDF("ts") withSQLConf(confKey -> "legacy") { val expected = Timestamp.valueOf("2020-01-27 20:06:11.847") checkAnswer(df.select(to_timestamp(col("ts"), "yyyy-MM-dd'T'HH:mm:ss.SSSz")), Row(expected)) } withSQLConf(confKey -> "corrected") { checkAnswer(df.select(to_timestamp(col("ts"), "yyyy-MM-dd'T'HH:mm:ss.SSSz")), Row(null)) } withSQLConf(confKey -> "exception") { checkExceptionMessage(df.select(to_timestamp(col("ts"), "yyyy-MM-dd'T'HH:mm:ss.SSSz"))) } } test("SPARK-30752: convert time zones on a daylight saving day") { val systemTz = LA.getId val sessionTz = "UTC" val fromTz = "Asia/Hong_Kong" val fromTs = "2019-11-03T12:00:00" // daylight saving date in America/Los_Angeles val utsTs = "2019-11-03T04:00:00" val defaultTz = TimeZone.getDefault try { TimeZone.setDefault(DateTimeUtils.getTimeZone(systemTz)) withSQLConf( SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true", SQLConf.SESSION_LOCAL_TIMEZONE.key -> sessionTz) { val expected = LocalDateTime.parse(utsTs) .atZone(DateTimeUtils.getZoneId(sessionTz)) .toInstant val df = Seq(fromTs).toDF("localTs") checkAnswer( df.select(to_utc_timestamp(col("localTs"), fromTz)), Row(expected)) } } finally { TimeZone.setDefault(defaultTz) } } test("SPARK-30766: date_trunc of old timestamps to hours and days") { def checkTrunc(level: String, expected: String): Unit = { val df = Seq("0010-01-01 01:02:03.123456") .toDF() .select($"value".cast("timestamp").as("ts")) .select(date_trunc(level, $"ts").cast("string")) checkAnswer(df, Row(expected)) } checkTrunc("HOUR", "0010-01-01 01:00:00") checkTrunc("DAY", "0010-01-01 00:00:00") } test("SPARK-30793: truncate timestamps before the epoch to seconds and minutes") { def checkTrunc(level: String, expected: String): Unit = { val df = Seq("1961-04-12 00:01:02.345") .toDF() .select($"value".cast("timestamp").as("ts")) .select(date_trunc(level, $"ts").cast("string")) checkAnswer(df, Row(expected)) } checkTrunc("SECOND", "1961-04-12 00:01:02") checkTrunc("MINUTE", "1961-04-12 00:01:00") } }
maropu/spark
sql/core/src/test/scala/org/apache/spark/sql/DateFunctionsSuite.scala
Scala
apache-2.0
37,164
package dotty.tools package dotc package fromtasty import core._ import Decorators._ import Contexts.Context import Symbols.{Symbol, ClassSymbol} import SymDenotations.ClassDenotation import NameOps._ import ast.Trees.Tree import Phases.Phase /** Load trees from TASTY files */ class ReadTasty extends Phase { def phaseName: String = "readTasty" override def isRunnable(implicit ctx: Context): Boolean = ctx.settings.fromTasty.value override def runOn(units: List[CompilationUnit])(implicit ctx: Context): List[CompilationUnit] = units.flatMap(readTASTY(_)(ctx.addMode(Mode.ReadPositions))) def readTASTY(unit: CompilationUnit)(implicit ctx: Context): Option[CompilationUnit] = unit match { case unit: TASTYCompilationUnit => val className = unit.className.toTypeName def cannotUnpickle(reason: String): None.type = { ctx.error(s"class $className cannot be unpickled because $reason") None } def compilationUnit(cls: Symbol): Option[CompilationUnit] = cls match { case cls: ClassSymbol => (cls.rootTreeOrProvider: @unchecked) match { case unpickler: tasty.DottyUnpickler => if (cls.rootTree.isEmpty) None else { val unit = CompilationUnit(cls, cls.rootTree, forceTrees = true) unit.pickled += (cls -> unpickler.unpickler.bytes) Some(unit) } case tree: Tree[?] => cls.denot.infoOrCompleter match { case _: NoLoader => Some(Scala2CompilationUnit(cls.fullName.toString)) case _ if cls.flags.is(Flags.JavaDefined) => Some(JavaCompilationUnit(cls.fullName.toString)) case _ => Some(AlreadyLoadedCompilationUnit(cls.denot.fullName.toString)) } case _ => cannotUnpickle(s"its class file does not have a TASTY attribute") } case _ => None } // The TASTY section in a/b/C.class may either contain a class a.b.C, an object a.b.C, or both. // We first try to load the class and fallback to loading the object if the class doesn't exist. // Note that if both the class and the object are present, then loading the class will also load // the object, this is why we use orElse here, otherwise we could load the object twice and // create ambiguities! ctx.base.staticRef(className) match { case clsd: ClassDenotation => clsd.infoOrCompleter match { case info: ClassfileLoader => info.load(clsd) // sets cls.rootTreeOrProvider and cls.moduleClass.treeProvider as a side-effect case _ => } def moduleClass = clsd.owner.info.member(className.moduleClassName).symbol compilationUnit(clsd.classSymbol).orElse(compilationUnit(moduleClass)) case _ => cannotUnpickle(s"no class file was found") } case unit => Some(unit) } def run(implicit ctx: Context): Unit = unsupported("run") }
som-snytt/dotty
compiler/src/dotty/tools/dotc/fromtasty/ReadTasty.scala
Scala
apache-2.0
3,049
package info.hupel.isabelle.tests import java.nio.file.Files import org.specs2.Specification import org.specs2.specification.core.Env import info.hupel.isabelle.Platform import info.hupel.isabelle.setup._ class SetupSpec(val specs2Env: Env) extends Specification with DefaultSetup { def is = s2""" Isabelle setup Isabelle setup detection should handle absent setups $absent should handle corrupted setups $corrupted""" def absent = { val dir = Files.createTempDirectory("libisabelle_test") val platform = Platform.genericPlatform(dir) Files.createDirectories(platform.setupStorage) Setup.detect(platform, version, false) must beLeft(Setup.Absent) } def corrupted = { val dir = Files.createTempDirectory("libisabelle_test") val platform = Platform.genericPlatform(dir) Files.createDirectories(platform.setupStorage(version, true)) Setup.detect(platform, version, false) must beLeft.like { case Setup.Corrupted(_) => true } } }
larsrh/libisabelle
tests/offline/src/test/scala/SetupSpec.scala
Scala
apache-2.0
1,004
/* * Copyright © 2014 TU Berlin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.emmalanguage package ast import scala.reflect.ClassTag import scala.reflect.api.Universe import scala.reflect.macros.Attachments /** * Implements various utility functions that mitigate and/or workaround deficiencies in Scala's * macros and runtime reflection APIs, e.g. non-idempotent type checking, lack of hygiene, * capture-avoiding substitution, fully-qualified names, fresh name generation, identifying * closures, etc. * * This trait has to be instantiated with a [[scala.reflect.api.Universe]] type and works for both * runtime and compile time reflection. */ trait CommonAST { val u: Universe /** Syntax sugar for partial functions. */ type =?>[-A, +B] = PartialFunction[A, B] import u._ import definitions._ import internal._ import Flag._ // ---------------------------------------------------------------------------------------------- // Abstract methods // ---------------------------------------------------------------------------------------------- /** Meta information (attachments). */ trait Meta { def all: Attachments def apply[T: ClassTag]: T = get[T].get def contains[T: ClassTag]: Boolean = all.contains[T] def get[T: ClassTag]: Option[T] = all.get[T] def remove[T: ClassTag](): Unit def update[T: ClassTag](att: T): Unit } private[ast] def freshNameSuffix: Char /** Returns `tpt` with its original field set. */ private[ast] def setOriginal(tpt: TypeTree, original: Tree): TypeTree /** Returns the meta information associated with `sym`. */ def meta(sym: Symbol): Meta /** Returns the meta information associated with `tree`. */ def meta(tree: Tree): Meta /** Returns the enclosing named entity (class, method, value, etc). */ def enclosingOwner: Symbol /** Infers an implicit value from the enclosing context (if possible). */ def inferImplicit(tpe: Type): Option[Tree] /** Raises a compiler warning. */ def warning(msg: String, pos: Position = NoPosition): Unit /** Raises an error and terminates compilation. */ def abort(msg: String, pos: Position = NoPosition): Nothing /** Parses a snippet of source code and returns the AST. */ def parse(code: String): Tree /** Type-checks a `tree` (use `typeMode=true` for type-trees). */ def typeCheck(tree: Tree, typeMode: Boolean = false): Tree /** Removes all type and symbol attributes from a `tree`. */ // FIXME: Replace with `c.untypecheck` or `tb.untypecheck` once SI-5464 is resolved. def unTypeCheck(tree: Tree): Tree = parse(showCode(tree, printRootPkg = true)) /** * Evaluates a snippet of code and returns a value of type `T`. * Note: this can be called on type--checked trees (as opposed to the eval method in ToolBox). */ def eval[T](code: Tree): T // ---------------------------------------------------------------------------------------------- // Property checks // ---------------------------------------------------------------------------------------------- /** Constant limits. */ object Max { /** Maximum number of lambda arguments. */ val FunParams = FunctionClass.seq.size /** Maximum number of tuple elements. */ val TupleElems = TupleClass.seq.size } object is { /** Does `sym` have the property encoded as flag(s)? */ def apply(property: FlagSet, sym: Symbol): Boolean = are(property, flags(sym)) /** The opposite of `is(property, sym)`. */ def not(property: FlagSet, sym: Symbol): Boolean = are.not(property, flags(sym)) /** Is `name` non-degenerate? */ def defined(name: Name): Boolean = name != null && name.toString.nonEmpty /** Is `pos` non-degenerate? */ def defined(pos: Position): Boolean = pos != null && pos != NoPosition /** Is `sym` non-degenerate? */ def defined(sym: Symbol): Boolean = sym != null && sym != NoSymbol /** Is `tree` non-degenerate? */ def defined(tree: Tree): Boolean = tree != null && tree.nonEmpty /** Is `tpe` non-degenerate? */ def defined(tpe: Type): Boolean = tpe != null && tpe != NoType /** Is `tpe` the type of a case class? */ def caseClass(tpe: u.Type): Boolean = tpe.typeSymbol match { case cls: u.ClassSymbol => cls.isCaseClass case _ => false } /** Is `name` a legal identifier (i.e. consisting only of word `\\w+` characters)? */ def encoded(name: Name): Boolean = name == name.encodedName /** Is `sym` a legal identifier (i.e. having an `encoded` name)? */ def encoded(sym: Symbol): Boolean = is.encoded(sym.name) /** Is `sym` local (its owner being a binding or a method)? */ def local(sym: Symbol): Boolean = { val owner = sym.owner !owner.isPackage && !owner.isClass && !owner.isModule } /** Is `sym` overloaded (i.e. having variants with different type signatures). */ def overloaded(sym: Symbol): Boolean = sym.isTerm && sym.asTerm.isOverloaded /** Is `sym` the `_root_` symbol? */ def root(sym: Symbol): Boolean = // This is tricky for Java classes sym.name == termNames.ROOTPKG || sym.name == rootMirror.RootClass.name /** Is `sym` a binding symbol (i.e. value, variable or parameter)? */ def binding(sym: Symbol): Boolean = sym.isTerm && { val term = sym.asTerm term.isVal || term.isVar || term.isParameter } /** Is `sym` a value (`val`) symbol? */ def value(sym: Symbol): Boolean = sym.isTerm && !sym.isParameter && sym.asTerm.isVal /** Is `sym` a variable (`var`) symbol? */ def variable(sym: Symbol): Boolean = sym.isTerm && sym.asTerm.isVar /** Is `sym` a label (loop) symbol? */ def label(sym: Symbol): Boolean = sym.isMethod && is(CONTRAVARIANT, sym) /** Is `sym` a method (`def`) symbol? */ def method(sym: Symbol): Boolean = sym.isMethod && is.not(CONTRAVARIANT, sym) /** Is `sym` a by-name parameter? */ def byName(sym: Symbol): Boolean = sym.isTerm && sym.asTerm.isByNameParam /** Is `sym` a stable identifier? */ def stable(sym: Symbol): Boolean = sym.isTerm && sym.asTerm.isStable /** Is `tpe` legal for a term (i.e. not of a higher kind or method)? */ def result(tpe: Type): Boolean = !tpe.takesTypeArgs && !is.method(tpe) /** Is `tpe` the type of a method (illegal for a term)? */ def method(tpe: Type): Boolean = !(tpe =:= tpe.finalResultType) /** Is `tpe` a stable path? */ def stable(tpe: Type): Boolean = tpe match { case u.ThisType(_) => true case u.SuperType(_, _) => true case u.SingleType(_, _) => true case _ => false } /** Does `tree` define a symbol that owns the children of `tree`? */ def owner(tree: Tree): Boolean = tree match { case _: Bind => false case _: Function => true case _: LabelDef => false case _ => tree.isDef } /** Is `tree` a statement? */ def stat(tree: Tree): Boolean = tree match { case _: Assign => true case _: Bind => false case _: LabelDef => true case _ => tree.isDef } /** Is `tree` a term? */ def term(tree: Tree): Boolean = tree match { case id: Ident => is.defined(id.symbol) && id.symbol.isTerm && is.result(id.tpe) case sel: Select => is.defined(sel.symbol) && sel.symbol.isTerm && is.result(sel.tpe) case app: Apply => is.result(app.tpe) case tapp: TypeApply => is.result(tapp.tpe) case _: Assign => false case _: Bind => false case _: LabelDef => false case _: New => false case _ => tree.isTerm } /** Is `tree` a valid pattern? */ def pattern(tree: Tree): Boolean = tree match { case Ident(termNames.WILDCARD) => true case id: Ident => is.stable(id.symbol) && is.result(id.tpe) case Apply(target, args) => target.isType && args.nonEmpty && is.result(tree.tpe) case _: Alternative => true case _: Bind => true case _: Literal => true case _: Typed => true case _: UnApply => true case _ => false } } object are { /** Are `flags` a superset of `property`? */ def apply(property: FlagSet, flags: FlagSet): Boolean = (flags | property) == flags /** The opposite of `are(property, flags)`. */ def not(property: FlagSet, flags: FlagSet): Boolean = (flags | property) != flags /** Are all `trees` non-degenerate? */ def defined(trees: Traversable[Tree]): Boolean = trees.forall(is.defined) /** Are all `trees` statements? */ def stats(trees: Traversable[Tree]): Boolean = trees.forall(is.stat) /** Are all `trees` terms? */ def terms(trees: Traversable[Tree]): Boolean = trees.forall(is.term) /** Are all `trees` valid patterns? */ def patterns(trees: Traversable[Tree]): Boolean = trees.forall(is.pattern) } object has { /** Does `sym` have an owner? */ def own(sym: Symbol): Boolean = is.defined(sym.owner) /** Does `sym` have a name? */ def nme(sym: Symbol): Boolean = is.defined(sym.name) /** Does `sym` have a type? */ def tpe(sym: Symbol): Boolean = is.defined(sym.info) /** Does `sym` have a position? */ def pos(sym: Symbol): Boolean = is.defined(sym.pos) /** Does `tree` reference a symbol? */ def sym(tree: Tree): Boolean = is.defined(tree.symbol) /** Does `tree` have a type? */ def tpe(tree: Tree): Boolean = is.defined(tree.tpe) /** Does `tree` have a position? */ def pos(tree: Tree): Boolean = is.defined(tree.pos) } object have { /** Do all `symbols` have an owner? */ def own(symbols: Traversable[Symbol]): Boolean = symbols.forall(has.own) /** Do all `symbols` have a name? */ def nme(symbols: Traversable[Symbol]): Boolean = symbols.forall(has.nme) /** Do all `trees` have a symbol? */ def sym(trees: Traversable[Tree]): Boolean = trees.forall(has.sym) /** Do all `trees` have a type? */ def tpe(trees: Traversable[Tree]): Boolean = trees.forall(has.tpe) /** Do all `trees` have a position? */ def pos(trees: Traversable[Tree]): Boolean = trees.forall(has.pos) } // ---------------------------------------------------------------------------------------------- // Flags // ---------------------------------------------------------------------------------------------- lazy val FlagsNoSynthetic = Flags - Flag.SYNTHETIC /** All explicit flags. */ lazy val Flags = Set( Flag.ABSOVERRIDE, Flag.ABSTRACT, Flag.ARTIFACT, Flag.BYNAMEPARAM, Flag.CASE, Flag.CASEACCESSOR, Flag.ARTIFACT, Flag.CONTRAVARIANT, Flag.COVARIANT, Flag.DEFAULTINIT, Flag.DEFAULTPARAM, Flag.DEFERRED, Flag.ENUM, Flag.FINAL, Flag.IMPLICIT, Flag.INTERFACE, Flag.LAZY, Flag.LOCAL, Flag.MACRO, Flag.MUTABLE, Flag.OVERRIDE, Flag.PARAM, Flag.PARAMACCESSOR, Flag.PRIVATE, Flag.PROTECTED, Flag.SEALED, Flag.STABLE, Flag.SYNTHETIC, Flag.TRAIT) // ---------------------------------------------------------------------------------------------- // Virtual nodes // ---------------------------------------------------------------------------------------------- /** Common parent for all virtual AST nodes. */ trait Node { override def toString: String = getClass.getSimpleName.dropRight(1) } }
aalexandrov/emma
emma-language/src/main/scala/org/emmalanguage/ast/CommonAST.scala
Scala
apache-2.0
12,237
/* * Copyright 2011 Delving B.V. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controllers import java.io.{ PrintWriter, StringWriter } import play.api.Logger import play.api.Play.current import play.api.mvc.{ Controller, Results, Result } import models.OrganizationConfiguration import extensions.Email import util.Quotes /** * Unified logging for controllers * * @author Manuel Bernhardt <[email protected]> */ trait Logging extends Secured { self: Controller with MultitenancySupport => protected val log = Logger("CultureHub") import ErrorReporter._ def Forbidden[A](implicit request: MultitenantRequest[A]): Result = { warning("Forbidden") Results.Forbidden } def Forbidden[A](why: String)(implicit request: MultitenantRequest[A]) = { warning(why) Results.Forbidden(why) } def NotFound[A](why: String)(implicit request: MultitenantRequest[A]) = { info(why) Results.NotFound(views.html.errors.notFound(request, why, None)) } def Error[A](implicit request: MultitenantRequest[A]) = { log.error(withContext("Internal server error")) reportError(request, "Internal server error") Results.InternalServerError(views.html.errors.error(None, None)) } def Error[A](why: String)(implicit request: MultitenantRequest[A]) = { log.error(withContext(why)) reportError(request, why) Results.InternalServerError(views.html.errors.error(None, Some(why))) } def Error[A](why: String, t: Throwable)(implicit request: MultitenantRequest[A]) = { log.error(withContext(why), t) reportError(request, t, why) Results.InternalServerError(views.html.errors.error(None, Some(why))) } // ~~~ Logger wrappers, with more context def info[A](message: String, args: String*)(implicit request: MultitenantRequest[A]) { log.info(withContext(m(message, args))) } def warning[A](message: String, args: String*)(implicit request: MultitenantRequest[A]) { log.warn(withContext(m(message, args))) } def logError[A](message: String, args: String*)(implicit request: MultitenantRequest[A], configuration: OrganizationConfiguration) { log.error(withContext(m(message, args))) reportError(request, if (message != null) message.format(args) else "") } def logError[A](e: Throwable, message: String, args: String*)(implicit request: MultitenantRequest[A], configuration: OrganizationConfiguration) { log.error(withContext(m(message, args)), e) reportError(request, if (message != null) message.format(args) else "") } def reportSecurity[A](message: String)(implicit request: MultitenantRequest[A]) { log.error("Attempted security breach: " + message) ErrorReporter.reportError(securitySubject, toReport(message, request)) } private def m(message: String, args: Seq[String]) = { if (args.length > 0) { message.format(args: _*) } else { message } } private def withContext[A](msg: String)(implicit request: MultitenantRequest[A]) = { "[%s] While accessing %s %s: %s".format(request.session.get("userName").getOrElse("Unknown") + "@" + configuration.orgId, request.method, request.uri, msg) } private def securitySubject[A](implicit request: MultitenantRequest[A]) = "***[CultureHub] Security alert on %s".format(request.domain) } object ErrorReporter { def reportError[A](request: MultitenancySupport#MultitenantRequest[A], message: String)(implicit configuration: OrganizationConfiguration) { reportError(subject(request), toReport(message, request)) } def reportError[A](request: MultitenancySupport#MultitenantRequest[A], e: Throwable, message: String)(implicit configuration: OrganizationConfiguration) { reportError(subject(request), toReport(message, e, request)) } def reportError(job: String, t: Throwable, message: String)(implicit configuration: OrganizationConfiguration) { reportError("[CultureHub] An error occured on node %s".format(configuration.commonsService.nodeName), toReport(job, message, t)) } def reportError(subject: String, report: String)(implicit configuration: OrganizationConfiguration) { Email(configuration.emailTarget.systemFrom, subject) .to(configuration.emailTarget.exceptionTo) .withContent( """ |Master, | |an error has happened: | |%s | | |---- |%s """.stripMargin.format(report, Quotes.randomQuote())) .send() } private def getUser[A](request: MultitenancySupport#MultitenantRequest[A]) = request.session.get("userName").getOrElse("Unknown") private def subject[A](request: MultitenancySupport#MultitenantRequest[A]) = "[CultureHub] An error occured on %s".format(request.domain) // port? def toReport(job: String, m: String, t: Throwable): String = { val sw = new StringWriter() val pw = new PrintWriter(sw) t.printStackTrace(pw) """ ~~~ Job ~~~ %s ~~~ Message ~~~ %s ~~~ Throwable message ~~~ %s ~~~ Stacktrace~~~ %s """.format(job, m, t.getMessage, sw.toString) } def toReport[A](m: String, request: MultitenancySupport#MultitenantRequest[A]) = { """ ~~~~ User ~~~ %s ~~~ Message ~~~ %s ~~~ Request context ~~~ %s""".format(getUser(request), m, fullContext(request)) } def toReport[A](m: String, t: Throwable, request: MultitenancySupport#MultitenantRequest[A]): String = { val sw = new StringWriter() val pw = new PrintWriter(sw) t.printStackTrace(pw) """ ~~~ User ~~~ %s ~~~ Message ~~~ %s ~~~ Throwable message ~~~ %s ~~~ Stacktrace~~~ %s ~~~ Request context ~~~ %s""".format(getUser(request), m, t.getMessage, sw.toString, fullContext(request)) } private def fullContext[A](request: MultitenancySupport#MultitenantRequest[A]) = { """| |URL: %s |METHOD: %s |HTTP PARAMS: |%s |HTTP HEADERS: |%s""".stripMargin.format( request.uri, request.method, request.queryString.map(pair => " " + pair._1 + ": " + pair._2.mkString(", ")).mkString("\\n"), request.headers.toMap.map(pair => " " + pair._1 + ": " + pair._2.mkString(", ")).mkString("\\n")) } }
delving/culture-hub
web-core/app/controllers/Logger.scala
Scala
apache-2.0
6,776
package com.github.wakfudecrypt.types.data import com.github.wakfudecrypt._ @BinaryDecoder case class WorldLootList( _0_int32: Int, _1_float64: Double, _2_int16: Short, _3_int16: Short, _4_str: String, _5_int16: Short, _6_array_composite: Array[WorldLootList_6_array_composite] ) object WorldLootList extends BinaryDataCompanion[WorldLootList] { override val dataId = 75 } @BinaryDecoder case class WorldLootList_6_array_composite( _0_int32: Int, _1_float64: Double, _2_str: String, _3_int16: Short, _4_int16: Short, _5_int16: Short, _6_int16: Short, _7_int16: Short, _8_bool: Boolean )
jac3km4/wakfudecrypt
types/src/main/scala/com/github/wakfudecrypt/types/data/WorldLootList.scala
Scala
mit
625
package autolift.algebird import autolift.LiftFlatMap import com.twitter.algebird.{Functor, Monad} trait AlgeLiftFlatMap[Obj, Fn] extends LiftFlatMap[Obj, Fn] object AlgeLiftFlatMap extends LowPriorityAlgeLiftFlatMap { def apply[Obj, Fn](implicit lift: AlgeLiftFlatMap[Obj, Fn]): Aux[Obj, Fn, lift.Out] = lift implicit def base[M[_], A, C >: A, B](implicit fm: Monad[M]): Aux[M[A], C => M[B], M[B]] = new AlgeLiftFlatMap[M[A], C => M[B]]{ type Out = M[B] def apply(fa: M[A], f: C => M[B]) = fm.flatMap(fa)(f) } } trait LowPriorityAlgeLiftFlatMap{ type Aux[Obj, Fn, Out0] = AlgeLiftFlatMap[Obj, Fn]{ type Out = Out0 } implicit def recur[F[_], G, Fn](implicit functor: Functor[F], lift: LiftFlatMap[G, Fn]): Aux[F[G], Fn, F[lift.Out]] = new AlgeLiftFlatMap[F[G], Fn]{ type Out = F[lift.Out] def apply(fg: F[G], f: Fn) = functor.map(fg){ g: G => lift(g, f) } } } final class LiftedFlatMap[A, B, M[_]](protected val f: A => M[B])(implicit fm: Monad[M]){ def andThen[C >: B, D](that: LiftedFlatMap[C, D, M]) = new LiftedFlatMap({ x: A => fm.flatMap(f(x))(that.f) }) def compose[C, D <: A](that: LiftedFlatMap[C, D, M]) = that andThen this def map[C](g: B => C): LiftedFlatMap[A, C, M] = new LiftedFlatMap({ x: A => fm.map(f(x))(g) }) def apply[That](that: That)(implicit lift: LiftFlatMap[That, A => M[B]]): lift.Out = lift(that, f) } trait LiftedFlatMapImplicits{ implicit def liftedFlatMapFunctor[A, M[_]] = new Functor[LiftedFlatMap[A, ?, M]]{ def map[B, C](lb: LiftedFlatMap[A, B, M])(f: B => C) = lb map f } } trait LiftFlatMapContext{ def liftFlatMap[A, B, M[_]](f: A => M[B])(implicit fm: Monad[M]) = new LiftedFlatMap(f) }
wheaties/AutoLifts
autolift-algebird/src/main/scala/autolift/algebird/LiftFlatMap.scala
Scala
apache-2.0
1,707
package pl.touk.nussknacker.engine.flink.util.transformer.aggregate import org.apache.flink.api.common.functions.RuntimeContext import org.apache.flink.configuration.Configuration import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction import org.apache.flink.streaming.api.windowing.windows.TimeWindow import org.apache.flink.util.Collector import pl.touk.nussknacker.engine.api.runtimecontext.{ContextIdGenerator, EngineRuntimeContext} import pl.touk.nussknacker.engine.api.{ValueWithContext, Context => NkContext} import pl.touk.nussknacker.engine.flink.api.process.FlinkCustomNodeContext import pl.touk.nussknacker.engine.flink.util.keyed.KeyEnricher class EnrichingWithKeyFunction(convertToEngineRuntimeContext: RuntimeContext => EngineRuntimeContext, nodeId: String) extends ProcessWindowFunction[AnyRef, ValueWithContext[AnyRef], String, TimeWindow] { @transient private var contextIdGenerator: ContextIdGenerator = _ override def open(parameters: Configuration): Unit = { contextIdGenerator = convertToEngineRuntimeContext(getRuntimeContext).contextIdGenerator(nodeId) } override def process(key: String, context: Context, values: Iterable[AnyRef], out: Collector[ValueWithContext[AnyRef]]): Unit = { values.foreach { value => out.collect(ValueWithContext(value, KeyEnricher.enrichWithKey(NkContext(contextIdGenerator.nextContextId()), key))) } } } object EnrichingWithKeyFunction { def apply(fctx: FlinkCustomNodeContext): EnrichingWithKeyFunction = new EnrichingWithKeyFunction(fctx.convertToEngineRuntimeContext, fctx.nodeId) }
TouK/nussknacker
engine/flink/components/base/src/main/scala/pl/touk/nussknacker/engine/flink/util/transformer/aggregate/EnrichingWithKeyFunction.scala
Scala
apache-2.0
1,595
object GraphSonatype extends Sonatype { def projectUrl = "http://subversion.assembla.com/svn/scala-graph/trunk" def licenseName = "The Apache Software License, Version 2.0" def licenseUrl = "http://www.apache.org/licenses/LICENSE-2.0.txt" def developerId = "peter" def developerName = "Peter Empen" def developerUrl = "http://www.assembla.com/spaces/scala-graph/wiki" override val isSvn = true }
Calavoow/scala-graph
project/GraphSonatype.scala
Scala
bsd-3-clause
428
package com.artclod.mathml import org.junit.runner.RunWith import org.scalatestplus.play._ import play.api.test.Helpers._ import org.junit.runner.RunWith import scala.xml._ import play.api.test._ import play.api.test.Helpers._ import com.artclod.mathml.scalar._ import com.artclod.mathml.scalar.apply._ import com.artclod.mathml.scalar.apply.{ApplyLn => ln} import com.artclod.mathml.scalar.apply.{ApplyLog => log} import com.artclod.mathml.Match._ import org.scalatest.junit.JUnitRunner class MathMLCheckEqExponentialSpec extends PlaySpec { "Checking equality between symbolic differentiation and manual derivative " should { "confirm e ^ x' = e ^ x" in { val f = (e ^ x) dx val g = e ^ x (f ?= g) mustBe(Yes) } "confirm e ^ 10*x' = 10 * e ^ 10*x" in { val f = (e ^ (`10` * x)) dx val g = `10` * (e ^ (`10` * x)) (f ?= g) mustBe(Yes) } "confirm e ^ 10*x' != 11* e ^ 10*x" in { val f = (e ^ (`10` * x)) dx val g = `11` * (e ^ (`10` * x)) (f ?= g) mustBe(No) } "confirm e ^ 100*x' = 100 * e ^ 100*x" in { val f = (e ^ (`100` * x)) dx val g = `100` * (e ^ (`100` * x)) (f ?= g) mustBe(Yes) } "confirm e ^ 100*x' != 101* e ^ 100*x" in { val f = (e ^ (`100` * x)) dx val g = `101` * (e ^ (`100` * x)) (f ?= g) mustBe(No) } "confirm e ^ 1000*x' = 1000 * e ^ 1000*x" in { val f = (e ^ (`1000` * x)) dx val g = `1000` * (e ^ (`1000` * x)) (f ?= g) mustBe(Yes) } "confirm e ^ 1000*x' != 1001* e ^ 1000*x" in { val f = (e ^ (`1000` * x)) dx val g = `1001` * (e ^ (`1000` * x)) (f ?= g) mustBe(No) } "confirm e ^ -100*x' = -100 * e ^ -100*x" in { val f = (e ^ (`-100` * x)) dx val g = `-100` * (e ^ (`-100` * x)) (f ?= g) mustBe(Yes) } "confirm e ^ -100*x' != -101 * e ^ -100*x" in { val f = (e ^ (`-100` * x)) dx val g = `-101` * (e ^ (`-100` * x)) (f ?= g) mustBe(No) } } }
kristiankime/calc-tutor
test/com/artclod/mathml/MathMLCheckEqExponentialSpec.scala
Scala
mit
1,897
package com.robocubs4205.oauth.grant import java.time.Instant /** * Created by trevor on 9/16/17. */ case class Grant(accessToken:String, refreshToken:Option[String], scopes:Seq[String], expires:Option[Instant])
robocubs4205/cubscout-server
api-server/app/com/robocubs4205/oauth/grant/Grant.scala
Scala
mit
269
package com.github.aselab.activerecord import com.github.aselab.activerecord.dsl._ package models { case class Model1(value: Int) extends ActiveRecord object Model1 extends ActiveRecordCompanion[Model1] case class Model2(value: Int) extends ActiveRecord object Model2 extends ActiveRecordCompanion[Model2] object Tables1 extends ActiveRecordTables { val models = table[Model1] } object Tables2 extends ActiveRecordTables { val models = table[Model2] } } class Model1Spec extends ActiveRecordSpecification { import models._ override def schema = com.github.aselab.activerecord.models.Tables1 override def config = Map( "jdbcurl" -> "jdbc:h2:mem:tables1" ) "ActiveRecordSpecification" should { "run.mode is test" >> { System.getProperty("run.mode") mustEqual "test" } "can override config" >> { Model1(1).create Model1.count mustEqual 1 } } } class Model2Spec extends ActiveRecordSpecification { import models._ override def schema = com.github.aselab.activerecord.models.Tables2 override def config = Map( "jdbcurl" -> "jdbc:h2:mem:tables2" ) "ActiveRecordSpecification" should { "can override config" >> { Model2(1).create Model2.count mustEqual 1 } } }
aselab/scala-activerecord
specs/src/test/scala/ActiveRecordSpecificationSpec.scala
Scala
mit
1,278
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package play.sbt import sbt._ import xsbti.Problem import play.api._ object PlayExceptions { private def filterAnnoyingErrorMessages(message: String): String = { val overloaded = """(?s)overloaded method value (.*) with alternatives:(.*)cannot be applied to(.*)""".r message match { case overloaded(method, _, signature) => s"Overloaded method value [$method] cannot be applied to $signature" case _ => message } } case class UnexpectedException(message: Option[String] = None, unexpected: Option[Throwable] = None) extends PlayException( "Unexpected exception", message.getOrElse(unexpected.fold("")(t => s"${t.getClass.getSimpleName}: ${t.getMessage}")), unexpected.orNull ) case class CompilationException(problem: Problem) extends PlayException.ExceptionSource("Compilation error", filterAnnoyingErrorMessages(problem.message)) { def line = problem.position.line.asScala.orNull def position = problem.position.pointer.asScala.orNull def input = problem.position.sourceFile.asScala.map(sf => IO.read(convertSbtVirtualFile(sf))).orNull def sourceName = problem.position.sourceFile.asScala.map(convertSbtVirtualFile(_).getAbsolutePath).orNull private def convertSbtVirtualFile(sourceFile: File) = { val sfPath = sourceFile.getPath if (sfPath.startsWith("${")) { // check for ${BASE} or similar (in case it changes) // Like: ${BASE}/app/controllers/MyController.scala new File(sfPath.substring(sfPath.indexOf("}") + 2)).getAbsoluteFile } else { // A file outside of the base project folder or using sbt <1.4 sourceFile } } } }
mkurz/playframework
dev-mode/sbt-plugin/src/main/scala/play/sbt/PlayExceptions.scala
Scala
apache-2.0
1,799
package com.draugrsoft.integration.helper.constants private [integration] object JobStatus { sealed abstract class JobStatusEnum(name:String){ override def toString = name } case object INITIALIZING extends JobStatusEnum("INITIALIZING") // Initial State when Integration boots up case object RUNNING extends JobStatusEnum("RUNNING") case object STOPPED extends JobStatusEnum("STOPPED") case object COMPLETED extends JobStatusEnum("COMPLETED") implicit def convertJobStatusToString(jse:JobStatusEnum):String = jse.toString implicit def convertStringToJobStatus(str:String):JobStatusEnum = { str match{ case "RUNNING" => RUNNING case "STOPPED" => STOPPED case "INITIALIZING" => INITIALIZING case "COMPLETED" => COMPLETED } } }
yfayman/integration-helper-scala
core/src/main/scala/com/draugrsoft/integration/helper/constants/JobStatus.scala
Scala
mit
800
package scodec package examples import scalaz.\\/- import scalaz.std.anyVal.unitInstance import shapeless._ import scodec.bits.{ BitVector, ByteVector } import scodec.codecs._ // Define MPEG codecs object MpegCodecs { // Define case classes that describe MPEG packets and define an HList iso for each case class TransportStreamHeader( transportStringIndicator: Boolean, payloadUnitStartIndicator: Boolean, transportPriority: Boolean, pid: Int, scramblingControl: Int, adaptationFieldControl: Int, continuityCounter: Int ) { def adaptationFieldIncluded: Boolean = adaptationFieldControl >= 2 def payloadIncluded: Boolean = adaptationFieldControl == 1 || adaptationFieldControl == 3 } case class AdaptationFieldFlags( discontinuity: Boolean, randomAccess: Boolean, priority: Boolean, pcrFlag: Boolean, opcrFlag: Boolean, splicingPointFlag: Boolean, transportPrivateDataFlag: Boolean, adaptationFieldExtension: Boolean) case class AdaptationField( flags: AdaptationFieldFlags, pcr: Option[BitVector], opcr: Option[BitVector], spliceCountdown: Option[Int] ) case class MpegPacket( header: TransportStreamHeader, adaptationField: Option[AdaptationField], payload: Option[ByteVector] ) implicit val transportStreamHeader: Codec[TransportStreamHeader] = fixedSizeBytes(4, { ("syncByte" | constant(0x47) ) :~>: ("transportStringIndicator" | bool ) :: ("payloadUnitStartIndicator" | bool ) :: ("transportPriority" | bool ) :: ("pid" | uint(13) ) :: ("scramblingControl" | uint2 ) :: ("adaptationFieldControl" | uint2 ) :: ("continuityCounter" | uint4 ) }).as[TransportStreamHeader] implicit val adaptationFieldFlags: Codec[AdaptationFieldFlags] = fixedSizeBytes(1, { ("discontinuity" | bool ) :: ("randomAccess" | bool ) :: ("priority" | bool ) :: ("pcrFlag" | bool ) :: ("opcrFlag" | bool ) :: ("splicingPointFlag" | bool ) :: ("transportPrivateDataFlag" | bool ) :: ("adaptationFieldExtension" | bool ) }).as[AdaptationFieldFlags] implicit val adaptationField: Codec[AdaptationField] = { ("adaptation_flags" | adaptationFieldFlags ) >>:~ { flags => ("pcr" | conditional(flags.pcrFlag, bits(48)) ) :: ("opcr" | conditional(flags.opcrFlag, bits(48)) ) :: ("spliceCountdown" | conditional(flags.splicingPointFlag, int8) ) }}.as[AdaptationField] implicit val mpegPacket: Codec[MpegPacket] = { ("header" | transportStreamHeader ) >>:~ { hdr => ("adaptation_field" | conditional(hdr.adaptationFieldIncluded, adaptationField) ) :: ("payload" | conditional(hdr.payloadIncluded, bytes(184)) ) }}.as[MpegPacket] } class MpegPacketExample extends CodecSuite { import MpegCodecs._ test("manually roundtripping a packet") { val pkt = MpegPacket(TransportStreamHeader(false, true, false, 0, 0, 1, 15), None, Some(BitVector.low(184 * 8).toByteVector)) val encoded = Codec.encodeValid(pkt) val decoded = Codec.decodeValidValue[MpegPacket](encoded) decoded shouldBe pkt } }
ceedubs/scodec
src/test/scala/scodec/examples/MpegPacketExample.scala
Scala
bsd-3-clause
3,741
/* * Copyright (c) Microsoft. All rights reserved. * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ package org.apache.spark.csharp import org.apache.spark.internal.Logging import org.scalatest.{FunSuite, Outcome} /** * Base abstract class for all unit tests in Sparkclr for handling common functionality. */ abstract class SparkCLRFunSuite extends FunSuite with Logging { /** * Log the suite name and the test name before and after each test. * * Subclasses should never override this method. If they wish to run * custom code before and after each test, they should should mix in * the {{org.scalatest.BeforeAndAfter}} trait instead. */ final protected override def withFixture(test: NoArgTest): Outcome = { val testName = test.text val suiteName = this.getClass.getName val shortSuiteName = suiteName.replaceAll("org.apache.spark", "o.a.s") try { logInfo(s"\\n\\n===== TEST OUTPUT FOR $shortSuiteName: '$testName' =====\\n") test() } finally { logInfo(s"\\n\\n===== FINISHED $shortSuiteName: '$testName' =====\\n") } } }
hebinhuang/Mobius
scala/src/test/scala/org/apache/spark/csharp/SparkCLRFunSuite.scala
Scala
mit
1,148
package controllers import play.api._ import play.api.mvc._ /** * Project IntelliJ IDEA * Module controllers * User: Gyuhyeon * Date: 2014. 2. 6. * Time: 오전 2:14 */ object Authentication extends Controller { }
wingleess/EZOne-server
app/controllers/Authentication.scala
Scala
lgpl-3.0
223
// Generated by the Scala Plugin for the Protocol Buffer Compiler. // Do not edit! // // Protofile syntax: PROTO3 package scalapb.perf.protos /** @param rep * [(scalapb.field).collection_type="List"]; */ @SerialVersionUID(0L) final case class MessageContainer( opt: _root_.scala.Option[scalapb.perf.protos.SimpleMessage] = _root_.scala.None, rep: _root_.scala.Seq[scalapb.perf.protos.SimpleMessage] = _root_.scala.Seq.empty, unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty ) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[MessageContainer] { @transient private[this] var __serializedSizeMemoized: _root_.scala.Int = 0 private[this] def __computeSerializedSize(): _root_.scala.Int = { var __size = 0 if (opt.isDefined) { val __value = opt.get __size += 1 + _root_.com.google.protobuf.CodedOutputStream .computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize }; rep.foreach { __item => val __value = __item __size += 1 + _root_.com.google.protobuf.CodedOutputStream .computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize } __size += unknownFields.serializedSize __size } override def serializedSize: _root_.scala.Int = { var __size = __serializedSizeMemoized if (__size == 0) { __size = __computeSerializedSize() + 1 __serializedSizeMemoized = __size } __size - 1 } def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = { opt.foreach { __v => val __m = __v _output__.writeTag(1, 2) _output__.writeUInt32NoTag(__m.serializedSize) __m.writeTo(_output__) }; rep.foreach { __v => val __m = __v _output__.writeTag(2, 2) _output__.writeUInt32NoTag(__m.serializedSize) __m.writeTo(_output__) }; unknownFields.writeTo(_output__) } def getOpt: scalapb.perf.protos.SimpleMessage = opt.getOrElse(scalapb.perf.protos.SimpleMessage.defaultInstance) def clearOpt: MessageContainer = copy(opt = _root_.scala.None) def withOpt(__v: scalapb.perf.protos.SimpleMessage): MessageContainer = copy(opt = Option(__v)) def clearRep = copy(rep = _root_.scala.Seq.empty) def addRep(__vs: scalapb.perf.protos.SimpleMessage*): MessageContainer = addAllRep(__vs) def addAllRep(__vs: Iterable[scalapb.perf.protos.SimpleMessage]): MessageContainer = copy(rep = rep ++ __vs) def withRep(__v: _root_.scala.Seq[scalapb.perf.protos.SimpleMessage]): MessageContainer = copy(rep = __v) def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v) def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty) def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = { (__fieldNumber: @ _root_.scala.unchecked) match { case 1 => opt.orNull case 2 => rep } } def getField( __field: _root_.scalapb.descriptors.FieldDescriptor ): _root_.scalapb.descriptors.PValue = { _root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor) (__field.number: @ _root_.scala.unchecked) match { case 1 => opt.map(_.toPMessage).getOrElse(_root_.scalapb.descriptors.PEmpty) case 2 => _root_.scalapb.descriptors.PRepeated(rep.iterator.map(_.toPMessage).toVector) } } def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this) def companion: scalapb.perf.protos.MessageContainer.type = scalapb.perf.protos.MessageContainer // @@protoc_insertion_point(GeneratedMessage[scalapb.perf.MessageContainer]) } object MessageContainer extends scalapb.GeneratedMessageCompanion[scalapb.perf.protos.MessageContainer] { implicit def messageCompanion : scalapb.GeneratedMessageCompanion[scalapb.perf.protos.MessageContainer] = this def parseFrom( `_input__`: _root_.com.google.protobuf.CodedInputStream ): scalapb.perf.protos.MessageContainer = { var __opt: _root_.scala.Option[scalapb.perf.protos.SimpleMessage] = _root_.scala.None val __rep: _root_.scala.collection.immutable.VectorBuilder[scalapb.perf.protos.SimpleMessage] = new _root_.scala.collection.immutable.VectorBuilder[scalapb.perf.protos.SimpleMessage] var `_unknownFields__` : _root_.scalapb.UnknownFieldSet.Builder = null var _done__ = false while (!_done__) { val _tag__ = _input__.readTag() _tag__ match { case 0 => _done__ = true case 10 => __opt = Option( __opt.fold( _root_.scalapb.LiteParser.readMessage[scalapb.perf.protos.SimpleMessage](_input__) )(_root_.scalapb.LiteParser.readMessage(_input__, _)) ) case 18 => __rep += _root_.scalapb.LiteParser .readMessage[scalapb.perf.protos.SimpleMessage](_input__) case tag => if (_unknownFields__ == null) { _unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder() } _unknownFields__.parseField(tag, _input__) } } scalapb.perf.protos.MessageContainer( opt = __opt, rep = __rep.result(), unknownFields = if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty else _unknownFields__.result() ) } implicit def messageReads : _root_.scalapb.descriptors.Reads[scalapb.perf.protos.MessageContainer] = _root_.scalapb.descriptors.Reads { case _root_.scalapb.descriptors.PMessage(__fieldsMap) => _root_.scala.Predef.require( __fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type." ) scalapb.perf.protos.MessageContainer( opt = __fieldsMap .get(scalaDescriptor.findFieldByNumber(1).get) .flatMap(_.as[_root_.scala.Option[scalapb.perf.protos.SimpleMessage]]), rep = __fieldsMap .get(scalaDescriptor.findFieldByNumber(2).get) .map(_.as[_root_.scala.Seq[scalapb.perf.protos.SimpleMessage]]) .getOrElse(_root_.scala.Seq.empty) ) case _ => throw new RuntimeException("Expected PMessage") } def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = ProtosProto.javaDescriptor.getMessageTypes().get(1) def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = ProtosProto.scalaDescriptor.messages(1) def messageCompanionForFieldNumber( __number: _root_.scala.Int ): _root_.scalapb.GeneratedMessageCompanion[_] = { var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null (__number: @ _root_.scala.unchecked) match { case 1 => __out = scalapb.perf.protos.SimpleMessage case 2 => __out = scalapb.perf.protos.SimpleMessage } __out } lazy val nestedMessagesCompanions : Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty def enumCompanionForFieldNumber( __fieldNumber: _root_.scala.Int ): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber) lazy val defaultInstance = scalapb.perf.protos.MessageContainer( opt = _root_.scala.None, rep = _root_.scala.Seq.empty ) implicit class MessageContainerLens[UpperPB]( _l: _root_.scalapb.lenses.Lens[UpperPB, scalapb.perf.protos.MessageContainer] ) extends _root_.scalapb.lenses.ObjectLens[UpperPB, scalapb.perf.protos.MessageContainer](_l) { def opt: _root_.scalapb.lenses.Lens[UpperPB, scalapb.perf.protos.SimpleMessage] = field(_.getOpt)((c_, f_) => c_.copy(opt = Option(f_))) def optionalOpt: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[ scalapb.perf.protos.SimpleMessage ]] = field(_.opt)((c_, f_) => c_.copy(opt = f_)) def rep : _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[scalapb.perf.protos.SimpleMessage]] = field(_.rep)((c_, f_) => c_.copy(rep = f_)) } final val OPT_FIELD_NUMBER = 1 final val REP_FIELD_NUMBER = 2 def of( opt: _root_.scala.Option[scalapb.perf.protos.SimpleMessage], rep: _root_.scala.Seq[scalapb.perf.protos.SimpleMessage] ): _root_.scalapb.perf.protos.MessageContainer = _root_.scalapb.perf.protos.MessageContainer( opt, rep ) // @@protoc_insertion_point(GeneratedMessageCompanion[scalapb.perf.MessageContainer]) }
scalapb/ScalaPB
docs/src/main/scala/generated/scalapb/perf/protos/MessageContainer.scala
Scala
apache-2.0
8,508
/** * The MIT License (MIT) * * Copyright (c) 2018 Israel Freitas([email protected]) * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ package ifreitas.scalaaiml.elements case class TopicStar(index: Index) extends TemplateExpression { def toXml = <topicstar>{ index.toXml }</topicstar> }
ifreitas/AimlToXml
src/main/scala/ifreitas/scalaaiml/elements/TopicStar.scala
Scala
mit
1,339
package taczombie.model.util import scala.language.implicitConversions import spray.json.DefaultJsonProtocol import spray.json.JsValue import spray.json.pimpAny import taczombie.model.Game import taczombie.model.GameFieldCell import taczombie.model.Human import taczombie.model.Zombie object JsonHelper { case class Data(cmd : String, gameMessage : String, gameData : JsValue, cells : JsValue, log : List[String], humanTokens : JsValue) case class Error(cmd : String = "error", message : String) case class GameData(gameState : String, currentPlayer : Char, lifes : Int, movesRemaining : Int, coins : Int, score : Int, powerUp : Int, levelWidth : Int, levelHeight : Int, frozenTime : Int, deadTokens : Int, totalTokens : Int) case class Cell(x : Int, y : Int, token : Char, isHighlighted : Boolean) case class HumanTokens(x : Int, y : Int, powerUp : Boolean) object ErrorJsonProtocol extends DefaultJsonProtocol { implicit val ErrorFormat = jsonFormat2(Error) } object GameDataJsonProtocol extends DefaultJsonProtocol { implicit val GameDataFormat = jsonFormat12(GameData) implicit val CellFormat = jsonFormat4(Cell) implicit val HumanTokensFormat = jsonFormat3(HumanTokens) implicit val DataFormat = jsonFormat6(Data) } trait Type case object All extends Type case object Updated extends Type var prevAllowedMoves : List[(Int, Int)] = List.empty class Game2JsonHelper(g : Game) { def toJson(command : Type) : String = { var lastUpdatedGameFieldCells : List[GameFieldCell] = List[GameFieldCell]() var cmd = "" command match { case All => cmd = "all" lastUpdatedGameFieldCells = { for { cell <- g.gameField.gameFieldCells } yield cell._2 }.toList case Updated => cmd = "updated" lastUpdatedGameFieldCells = g.gameField.lastUpdatedGameFieldCells } val gameState = g.gameState.toString val currentPlayer = g.players.currentPlayer val frozenTime = currentPlayer.currentToken(g.gameField).frozenTime val deadTokenCount = currentPlayer.deadTokenCount(g.gameField) val totalTokens = currentPlayer.totalTokens var currentPlayerTokenAsChar = ' ' var lifes = 0 val movesRemaining = currentPlayer.movesRemaining val coins = currentPlayer.coins(g.gameField) val score = currentPlayer.score(g.gameField) val width = g.gameField.levelWidth var powerUp = 0 val height = g.gameField.levelHeight currentPlayer match { case h : Human => currentPlayerTokenAsChar = 'H' powerUp = h.currentToken(g.gameField).powerupTime lifes = h.lifes case z : Zombie => currentPlayerTokenAsChar = 'Z' } // Collect and simplify changed game cells import taczombie.model.util.CoordinateHelper._ val allowedMoves = currentPlayer.currentToken(g.gameField).coords.calculateAllowedMoves(movesRemaining, g) val gameFieldCellsFromAllowedMoves = g.gameField.gameFieldCells.filter(x => allowedMoves.contains(x._1._1, x._1._2)) // Get current gameFieldCells which were highlighted, but now arent highlighted anymore. val gameFieldCellsFromPrevAllowedMoves = g.gameField.gameFieldCells.filter(x => prevAllowedMoves.filter( y => !allowedMoves.contains(y)).contains(x._1._1, x._1._2)) // Updated lastAllowedMoves prevAllowedMoves = allowedMoves // Add highlighted Cells. var highlightedCells = { for { gameFieldCell <- gameFieldCellsFromAllowedMoves cell = Cell(gameFieldCell._1._1, gameFieldCell._1._2, getChar(gameFieldCell._2), true) } yield cell }.toList // Add updated Cells. var updatedCells = highlightedCells ::: { if (lastUpdatedGameFieldCells != null) for { gameFieldCell <- lastUpdatedGameFieldCells.filter(x => !gameFieldCellsFromAllowedMoves.contains(x.coords)) cell = Cell(gameFieldCell.coords._1, gameFieldCell.coords._2, getChar(gameFieldCell), false) } yield cell else Nil } // Add unhighlighted Cells. (because they arent counted as updated cells). var cells = updatedCells ::: { for { gameFieldCell <- gameFieldCellsFromPrevAllowedMoves cell = Cell(gameFieldCell._1._1, gameFieldCell._1._2, getChar(gameFieldCell._2), false) } yield cell }.toList var logList = g.logger.get val humanTokens = for { token <- g.gameField.findHumanTokens hToken = HumanTokens(token.coords._1, token.coords._2, token.powerupTime > 0) } yield hToken import GameDataJsonProtocol._ val gameData = GameData(gameState, currentPlayerTokenAsChar, lifes, movesRemaining, coins, score, powerUp, width, height, frozenTime, deadTokenCount, totalTokens) val gameDataJson = gameData.toJson val data = Data(cmd.toString(), g.lastGameMessage, gameDataJson, cells.toJson, logList, humanTokens.toJson) data.toJson.toString } private def getChar(gameFieldCell : GameFieldCell) : Char = { val char : Char = { if (gameFieldCell == null) 'N' else if (gameFieldCell.containsWall) 'W' else if (gameFieldCell.containsLivingZombieToken) 'Z' else if (gameFieldCell.containsLivingHumanToken) 'H' else if (gameFieldCell.containsCoin) 'C' else if (gameFieldCell.containsPowerup) 'P' else 'N' } (char) } } class String2ErrorJsonHelper(str : String) { def toErrorJson : String = { import ErrorJsonProtocol._ Error(message = str).toJson.toString } } implicit def game2JsonWrapper(game : Game) = new Game2JsonHelper(game) implicit def string2ErrorJsonWrapper(str : String) = new String2ErrorJsonHelper(str) }
mahieke/TacZombie
model/src/main/scala/taczombie/model/util/JsonHelper.scala
Scala
gpl-2.0
5,955
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.api.inject import java.util.concurrent.Executor import javax.inject.{ Inject, Provider, Singleton } import akka.actor.ActorSystem import akka.stream.Materializer import com.typesafe.config.Config import play.api._ import play.api.http.HttpConfiguration._ import play.api.http._ import play.api.libs.Files.TemporaryFileReaperConfiguration.TemporaryFileReaperConfigurationProvider import play.api.libs.Files._ import play.api.libs.concurrent._ import play.api.mvc._ import play.api.mvc.request.{ DefaultRequestFactory, RequestFactory } import play.api.routing.Router import play.core.j.JavaRouterAdapter import play.libs.concurrent.HttpExecutionContext import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor } /** * The Play BuiltinModule. * * Provides all the core components of a Play application. This is typically automatically enabled by Play for an * application. */ class BuiltinModule extends SimpleModule((env, conf) => { def dynamicBindings(factories: ((Environment, Configuration) => Seq[Binding[_]])*) = { factories.flatMap(_ (env, conf)) } Seq( bind[Environment] to env, bind[ConfigurationProvider].to(new ConfigurationProvider(conf)), bind[Configuration].toProvider[ConfigurationProvider], bind[Config].toProvider[ConfigProvider], bind[HttpConfiguration].toProvider[HttpConfigurationProvider], bind[ParserConfiguration].toProvider[ParserConfigurationProvider], bind[CookiesConfiguration].toProvider[CookiesConfigurationProvider], bind[FlashConfiguration].toProvider[FlashConfigurationProvider], bind[SessionConfiguration].toProvider[SessionConfigurationProvider], bind[ActionCompositionConfiguration].toProvider[ActionCompositionConfigurationProvider], bind[FileMimeTypesConfiguration].toProvider[FileMimeTypesConfigurationProvider], bind[SecretConfiguration].toProvider[SecretConfigurationProvider], bind[TemporaryFileReaperConfiguration].toProvider[TemporaryFileReaperConfigurationProvider], bind[CookieHeaderEncoding].to[DefaultCookieHeaderEncoding], bind[RequestFactory].to[DefaultRequestFactory], bind[TemporaryFileReaper].to[DefaultTemporaryFileReaper], bind[TemporaryFileCreator].to[DefaultTemporaryFileCreator], bind[PlayBodyParsers].to[DefaultPlayBodyParsers], bind[BodyParsers.Default].toSelf, bind[DefaultActionBuilder].to[DefaultActionBuilderImpl], bind[ControllerComponents].to[DefaultControllerComponents], bind[Futures].to[DefaultFutures], // Application lifecycle, bound both to the interface, and its implementation, so that Application can access it // to shut it down. bind[DefaultApplicationLifecycle].toSelf, bind[ApplicationLifecycle].to(bind[DefaultApplicationLifecycle]), bind[Application].to[DefaultApplication], bind[play.Application].to[play.DefaultApplication], bind[Router].toProvider[RoutesProvider], bind[play.routing.Router].to[JavaRouterAdapter], bind[ActorSystem].toProvider[ActorSystemProvider], bind[Materializer].toProvider[MaterializerProvider], bind[ExecutionContextExecutor].toProvider[ExecutionContextProvider], bind[ExecutionContext].to[ExecutionContextExecutor], bind[Executor].to[ExecutionContextExecutor], bind[HttpExecutionContext].toSelf, bind[FileMimeTypes].toProvider[DefaultFileMimeTypesProvider] ) ++ dynamicBindings( HttpErrorHandler.bindingsFromConfiguration, HttpFilters.bindingsFromConfiguration, HttpRequestHandler.bindingsFromConfiguration, ActionCreator.bindingsFromConfiguration ) }) // This allows us to access the original configuration via this // provider while overriding the binding for Configuration itself. class ConfigurationProvider(val get: Configuration) extends Provider[Configuration] class ConfigProvider @Inject() (configuration: Configuration) extends Provider[Config] { override def get() = configuration.underlying } @Singleton class RoutesProvider @Inject() (injector: Injector, environment: Environment, configuration: Configuration, httpConfig: HttpConfiguration) extends Provider[Router] { lazy val get = { val prefix = httpConfig.context val router = Router.load(environment, configuration) .fold[Router](Router.empty)(injector.instanceOf(_)) router.withPrefix(prefix) } }
hagl/playframework
framework/src/play/src/main/scala/play/api/inject/BuiltinModule.scala
Scala
apache-2.0
4,403
package com.stefansavev.core.string2id import java.io.{InputStream, OutputStream} import com.stefansavev.core.serialization.{StringSerializer, IntSerializer, TypedSerializer} import com.stefansavev.core.serialization.StringSerializer object String2IdHasherSerialization { implicit object String2IdHasherSerializer extends TypedSerializer[String2IdHasher] { def toBinary(outputStream: OutputStream, string2Id: String2IdHasher): Unit = { if (string2Id == null){ throw new IllegalStateException("string2IdHasher cannot be null") } val settings = string2Id.getSettings() IntSerializer.write(outputStream, settings.maxValues) IntSerializer.write(outputStream, settings.avgStringLen) IntSerializer.write(outputStream, settings.toleratedNumberOfCollisions) var id = 0 val numStrings = string2Id.numberOfUniqueStrings() IntSerializer.write(outputStream, numStrings) while(id < numStrings){ val str = string2Id.getStringAtInternalIndex(id).get StringSerializer.write(outputStream, str) id += 1 } } def fromBinary(inputStream: InputStream): String2IdHasher = { /*just print memory usage val maxValues = IntSerializer.read(inputStream) val avgStringLen = IntSerializer.read(inputStream) val numCollisions = IntSerializer.read(inputStream) val numStrings = IntSerializer.read(inputStream) var size = 0L var i = 0 while(i < numStrings){ val str = StringSerializer.read(inputStream) size += (1 + str.length*2) i += 1 } println("Strings size in MB: " + (size/(1024*1024))) null */ val maxValues = IntSerializer.read(inputStream) val avgStringLen = IntSerializer.read(inputStream) val numCollisions = IntSerializer.read(inputStream) val settings = new StringIdHasherSettings(maxValues, avgStringLen, numCollisions) val string2IdHasher = new String2IdHasher(settings) val numStrings = IntSerializer.read(inputStream) var i = 0 while(i < numStrings){ val str = StringSerializer.read(inputStream) val handle = string2IdHasher.add(str) val index = string2IdHasher.getInternalId(handle) if (index != i){ throw new IllegalStateException("Internal error while reading hashed strings") } i += 1 } string2IdHasher } def name: String = "String2IdHasherSerializer" } }
stefansavev/random-projections-at-berlinbuzzwords
src/main/scala/com/stefansavev/core/string2id/String2IdHasherSerializer.scala
Scala
apache-2.0
2,486
package controllers import com.thetestpeople.trt.service.Service import com.thetestpeople.trt.utils.HasLogger import play.api.data.Form import play.api.mvc._ import routes.ImportSpecController import viewModel._ import views.html /** * Controller for TeamCity config screen */ class TeamCityController(service: Service) extends AbstractController(service) with HasLogger { def teamCityConfig() = Action { implicit request ⇒ Ok(html.teamCityConfig(getTeamCityConfigurationForm)) } def updateTeamCityConfig() = Action { implicit request ⇒ TeamCityConfigurationForm.form.bindFromRequest().fold( formWithErrors ⇒ { logger.debug("updateTeamCityConfig() errors: " + formWithErrors.errorsAsJson) BadRequest(html.teamCityConfig(formWithErrors)) }, configuration ⇒ { val newConfig = configuration.asTeamCityConfiguration service.updateTeamCityConfiguration(newConfig) logger.debug(s"New TeamCity config: $newConfig") Redirect(routes.TeamCityController.teamCityConfig).flashing("success" -> "Updated configuration") }) } private def getTeamCityConfigurationForm: Form[EditableTeamCityConfiguration] = { val editableConfig = EditableTeamCityConfiguration(service.getTeamCityConfiguration) TeamCityConfigurationForm.form.fill(editableConfig) } }
thetestpeople/trt
app/controllers/TeamCityController.scala
Scala
mit
1,350
package org.bitcoins.core.script.interpreter import org.bitcoins.core.consensus.Consensus import org.bitcoins.core.crypto.{BaseTransactionSignatureComponent, WitnessV0TransactionSignatureComponent} import org.bitcoins.core.currency.{CurrencyUnit, CurrencyUnits} import org.bitcoins.core.protocol.CompactSizeUInt import org.bitcoins.core.protocol.script._ import org.bitcoins.core.protocol.transaction.{BaseTransaction, EmptyTransactionOutPoint, Transaction, WitnessTransaction} import org.bitcoins.core.script._ import org.bitcoins.core.script.arithmetic._ import org.bitcoins.core.script.bitwise._ import org.bitcoins.core.script.constant.{ScriptToken, _} import org.bitcoins.core.script.control._ import org.bitcoins.core.script.crypto._ import org.bitcoins.core.script.flag._ import org.bitcoins.core.script.locktime.{LockTimeInterpreter, OP_CHECKLOCKTIMEVERIFY, OP_CHECKSEQUENCEVERIFY} import org.bitcoins.core.script.reserved._ import org.bitcoins.core.script.result._ import org.bitcoins.core.script.splice._ import org.bitcoins.core.script.stack._ import org.bitcoins.core.util.{BitcoinSLogger, BitcoinSUtil, BitcoinScriptUtil} import scala.annotation.tailrec /** * Created by chris on 1/6/16. */ trait ScriptInterpreter extends CryptoInterpreter with StackInterpreter with ControlOperationsInterpreter with BitwiseInterpreter with ConstantInterpreter with ArithmeticInterpreter with SpliceInterpreter with LockTimeInterpreter with BitcoinSLogger { /** * Currently bitcoin core limits the maximum number of non-push operations per script * to 201 */ private lazy val maxScriptOps = 201 /** We cannot push an element larger than 520 bytes onto the stack */ private lazy val maxPushSize = 520 /** * Runs an entire script though our script programming language and * returns a [[ScriptResult]] indicating if the script was valid, or if not what error it encountered */ def run(program : PreExecutionScriptProgram) : ScriptResult = { val scriptSig = program.txSignatureComponent.scriptSignature val scriptPubKey = program.txSignatureComponent.scriptPubKey val flags = program.flags val p2shEnabled = ScriptFlagUtil.p2shEnabled(flags) val segwitEnabled = ScriptFlagUtil.segWitEnabled(flags) val executedProgram : ExecutedScriptProgram = if (ScriptFlagUtil.requirePushOnly(flags) && !BitcoinScriptUtil.isPushOnly(program.script)) { logger.error("We can only have push operations inside of the script sig when the SIGPUSHONLY flag is set") ScriptProgram(program,ScriptErrorSigPushOnly) } else if (scriptSig.isInstanceOf[P2SHScriptSignature] && p2shEnabled && !BitcoinScriptUtil.isPushOnly(scriptSig.asm)) { logger.error("P2SH scriptSigs are required to be push only by definition - see BIP16, got: " + scriptSig.asm) ScriptProgram(program,ScriptErrorSigPushOnly) } else { val scriptSigExecutedProgram = loop(program,0) val t = scriptSigExecutedProgram.txSignatureComponent val scriptPubKeyProgram = ScriptProgram(t, scriptSigExecutedProgram.stack, t.scriptPubKey.asm, t.scriptPubKey.asm) val scriptPubKeyExecutedProgram : ExecutedScriptProgram = loop(scriptPubKeyProgram,0) if (scriptSigExecutedProgram.error.isDefined) { scriptSigExecutedProgram } else if (scriptPubKeyExecutedProgram.error.isDefined || scriptPubKeyExecutedProgram.stackTopIsFalse) { scriptPubKeyExecutedProgram } else { scriptPubKey match { case witness : WitnessScriptPubKey => if (segwitEnabled) executeSegWitScript(scriptPubKeyExecutedProgram,witness) else scriptPubKeyExecutedProgram case p2sh : P2SHScriptPubKey => if (p2shEnabled) executeP2shScript(scriptSigExecutedProgram, program, p2sh) else scriptPubKeyExecutedProgram case _ : P2PKHScriptPubKey | _: P2PKScriptPubKey | _: MultiSignatureScriptPubKey | _: CSVScriptPubKey | _ : CLTVScriptPubKey | _ : NonStandardScriptPubKey | _ : WitnessCommitment | EmptyScriptPubKey => scriptPubKeyExecutedProgram } } } logger.debug("Executed Script Program: " + executedProgram) if (executedProgram.error.isDefined) executedProgram.error.get else if (hasUnexpectedWitness(program)) { //note: the 'program' value we pass above is intentional, we need to check the original program //as the 'executedProgram' may have had the scriptPubKey value changed to the rebuilt ScriptPubKey of the witness program ScriptErrorWitnessUnexpected } else if (executedProgram.stackTopIsTrue && flags.contains(ScriptVerifyCleanStack)) { //require that the stack after execution has exactly one element on it if (executedProgram.stack.size == 1) ScriptOk else ScriptErrorCleanStack } else if (executedProgram.stackTopIsTrue) ScriptOk else ScriptErrorEvalFalse } /** * P2SH scripts are unique in their evaluation, first the scriptSignature must be added to the stack, next the * p2sh scriptPubKey must be run to make sure the serialized redeem script hashes to the value found in the p2sh * scriptPubKey, then finally the serialized redeemScript is decoded and run with the arguments in the p2sh script signature * a p2sh script returns true if both of those intermediate steps evaluate to true * * @param scriptPubKeyExecutedProgram the program with the script signature pushed onto the stack * @param originalProgram the original program, used for setting errors & checking that the original script signature contains push only tokens * @param p2shScriptPubKey the p2sh scriptPubKey that contains the value the redeemScript must hash to * @return the executed program */ private def executeP2shScript(scriptPubKeyExecutedProgram : ExecutedScriptProgram, originalProgram : ScriptProgram, p2shScriptPubKey : P2SHScriptPubKey) : ExecutedScriptProgram = { /** Helper function to actually run a p2sh script */ def run(p: ExecutedScriptProgram, stack : Seq[ScriptToken], s: ScriptPubKey): ExecutedScriptProgram = { logger.debug("Running p2sh script: " + stack) val p2shRedeemScriptProgram = ScriptProgram(p.txSignatureComponent,stack.tail, s.asm) if (ScriptFlagUtil.requirePushOnly(p2shRedeemScriptProgram.flags) && !BitcoinScriptUtil.isPushOnly(s.asm)) { logger.error("p2sh redeem script must be push only operations whe SIGPUSHONLY flag is set") ScriptProgram(p2shRedeemScriptProgram,ScriptErrorSigPushOnly) } else loop(p2shRedeemScriptProgram,0) } val scriptSig = scriptPubKeyExecutedProgram.txSignatureComponent.scriptSignature val scriptSigAsm : Seq[ScriptToken] = scriptSig.asm //need to check if the scriptSig is push only as required by bitcoin core //https://github.com/bitcoin/bitcoin/blob/528472111b4965b1a99c4bcf08ac5ec93d87f10f/src/script/interpreter.cpp#L1419 if (!BitcoinScriptUtil.isPushOnly(scriptSigAsm)) { ScriptProgram(scriptPubKeyExecutedProgram,ScriptErrorSigPushOnly) } else if (scriptPubKeyExecutedProgram.error.isDefined) { scriptPubKeyExecutedProgram } else { scriptPubKeyExecutedProgram.stackTopIsTrue match { case true => logger.debug("Hashes matched between the p2shScriptSignature & the p2shScriptPubKey") //we need to run the deserialized redeemScript & the scriptSignature without the serialized redeemScript val stack = scriptPubKeyExecutedProgram.stack val redeemScriptBytes = stack.head.bytes val c = CompactSizeUInt.calculateCompactSizeUInt(redeemScriptBytes) val redeemScript = ScriptPubKey(c.bytes ++ redeemScriptBytes) redeemScript match { case w : WitnessScriptPubKey => val pushOp = BitcoinScriptUtil.calculatePushOp(redeemScriptBytes) val expectedScriptBytes = pushOp.flatMap(_.bytes) ++ redeemScriptBytes val flags = scriptPubKeyExecutedProgram.flags val segwitEnabled = ScriptFlagUtil.segWitEnabled(flags) if (segwitEnabled && (scriptSig.asmBytes == expectedScriptBytes)) { // The scriptSig must be _exactly_ a single push of the redeemScript. Otherwise we // reintroduce malleability. logger.info("redeem script was witness script pubkey, segwit was enabled, scriptSig was single push of redeemScript") executeSegWitScript(scriptPubKeyExecutedProgram,w) } else if (segwitEnabled && (scriptSig.asmBytes != expectedScriptBytes)) { logger.error("Segwit was enabled, but p2sh redeem script was malleated") logger.error("ScriptSig bytes: " + scriptSig.hex) logger.error("expected scriptsig bytes: " + BitcoinSUtil.encodeHex(expectedScriptBytes)) ScriptProgram(scriptPubKeyExecutedProgram, ScriptErrorWitnessMalleatedP2SH) } else { logger.warn("redeem script was witness script pubkey, segwit was NOT enabled") //treat the segwit scriptpubkey as any other redeem script run(scriptPubKeyExecutedProgram,stack,w) } case s @ (_ : P2SHScriptPubKey | _ : P2PKHScriptPubKey | _ : P2PKScriptPubKey | _ : MultiSignatureScriptPubKey | _ : CLTVScriptPubKey | _ : CSVScriptPubKey | _: NonStandardScriptPubKey | _ : WitnessCommitment | EmptyScriptPubKey) => logger.debug("redeemScript: " + s.asm) run(scriptPubKeyExecutedProgram,stack,s) } case false => logger.warn("P2SH scriptPubKey hash did not match the hash for the serialized redeemScript") scriptPubKeyExecutedProgram } } } /** Runs a segwit script through our interpreter, mimics this functionality in bitcoin core: * [[https://github.com/bitcoin/bitcoin/blob/528472111b4965b1a99c4bcf08ac5ec93d87f10f/src/script/interpreter.cpp#L1441-L1452]] * @param scriptPubKeyExecutedProgram the program with the [[ScriptPubKey]] executed * @return */ private def executeSegWitScript(scriptPubKeyExecutedProgram: ExecutedScriptProgram, witnessScriptPubKey: WitnessScriptPubKey): ExecutedScriptProgram = { scriptPubKeyExecutedProgram.txSignatureComponent match { case b : BaseTransactionSignatureComponent => logger.error("Cannot verify witness program with a BaseTransactionSignatureComponent") ScriptProgram(scriptPubKeyExecutedProgram,ScriptErrorWitnessProgramWitnessEmpty) case w : WitnessV0TransactionSignatureComponent => val scriptSig = scriptPubKeyExecutedProgram.txSignatureComponent.scriptSignature val (witnessVersion,witnessProgram) = (witnessScriptPubKey.witnessVersion, witnessScriptPubKey.witnessProgram) val witness = w.witness //scriptsig must be empty if we have raw p2wsh //if script pubkey is a P2SHScriptPubKey then we have P2SH(P2WSH) if (scriptSig != EmptyScriptSignature && !w.scriptPubKey.isInstanceOf[P2SHScriptPubKey]) ScriptProgram(scriptPubKeyExecutedProgram,ScriptErrorWitnessMalleated) else if (witness.stack.exists(_.size > maxPushSize)) ScriptProgram(scriptPubKeyExecutedProgram, ScriptErrorPushSize) else verifyWitnessProgram(witnessVersion, witness, witnessProgram, w) } } /** Verifies a segregated witness program by running it through the interpreter * [[https://github.com/bitcoin/bitcoin/blob/f8528134fc188abc5c7175a19680206964a8fade/src/script/interpreter.cpp#L1302]]*/ private def verifyWitnessProgram(witnessVersion: WitnessVersion, scriptWitness: ScriptWitness, witnessProgram: Seq[ScriptToken], witnessTxSigComponent: WitnessV0TransactionSignatureComponent): ExecutedScriptProgram = { /** Helper function to run the post segwit execution checks */ def postSegWitProgramChecks(evaluated: ExecutedScriptProgram): ExecutedScriptProgram = { logger.debug("Stack after evaluating witness: " + evaluated.stack) if (evaluated.error.isDefined) evaluated else if (evaluated.stack.size != 1 || evaluated.stackTopIsFalse) ScriptProgram(evaluated,ScriptErrorEvalFalse) else evaluated } witnessVersion match { case WitnessVersion0 => val either: Either[(Seq[ScriptToken], ScriptPubKey),ScriptError] = witnessVersion.rebuild(scriptWitness, witnessProgram) either match { case Left((stack,scriptPubKey)) => val w = witnessTxSigComponent val newProgram = ScriptProgram(w.transaction,scriptPubKey,w.inputIndex,stack,scriptPubKey.asm, scriptPubKey.asm,Nil, w.flags,w.sigVersion,w.amount) val evaluated = loop(newProgram,0) postSegWitProgramChecks(evaluated) case Right(err) => val program = ScriptProgram(witnessTxSigComponent) ScriptProgram(program,err) } case UnassignedWitness => logger.warn("Unassigned witness inside of witness script pubkey") val w = witnessTxSigComponent val flags = w.flags val discourageUpgradableWitnessVersion = ScriptFlagUtil.discourageUpgradableWitnessProgram(flags) val program = ScriptProgram(w.transaction,w.scriptPubKey,w.inputIndex,Nil,Nil, w.scriptPubKey.asm,Nil, w.flags,w.sigVersion,w.amount) if (discourageUpgradableWitnessVersion) { ScriptProgram(program,UnassignedWitness.rebuild(scriptWitness, witnessProgram).right.get) } else { //if we are not discouraging upgradable ops, we just trivially return the program with an OP_TRUE on the stack //see: https://github.com/bitcoin/bitcoin/blob/b83264d9c7a8ddb79f64bd9540caddc8632ef31f/src/script/interpreter.cpp#L1386-L1389 val evaluated = loop(ScriptProgram(program,Seq(OP_TRUE),ScriptProgram.Stack),0) evaluated } } } /** * The execution loop for a script * * @param program the program whose script needs to be evaluated * @return program the final state of the program after being evaluated by the interpreter */ @tailrec private def loop(program : ScriptProgram, opCount: Int) : ExecutedScriptProgram = { logger.debug("Stack: " + program.stack) logger.debug("Script: " + program.script) if (opCount > maxScriptOps && !program.isInstanceOf[ExecutedScriptProgram]) { logger.error("We have reached the maximum amount of script operations allowed") logger.error("Here are the remaining operations in the script: " + program.script) loop(ScriptProgram(program,ScriptErrorOpCount),opCount) } else if (program.script.flatMap(_.bytes).size > 10000 && !program.isInstanceOf[ExecutedScriptProgram]) { logger.error("We cannot run a script that is larger than 10,000 bytes") program match { case p : PreExecutionScriptProgram => loop(ScriptProgram(ScriptProgram.toExecutionInProgress(p), ScriptErrorScriptSize),opCount) case _ : ExecutionInProgressScriptProgram | _ : ExecutedScriptProgram => loop(ScriptProgram(program, ScriptErrorScriptSize),opCount) } } else { program match { case p : PreExecutionScriptProgram => loop(ScriptProgram.toExecutionInProgress(p,Some(p.stack)),opCount) case p : ExecutedScriptProgram => val countedOps = program.originalScript.map(BitcoinScriptUtil.countsTowardsScriptOpLimit(_)).count(_ == true) logger.info("Counted ops: " + countedOps) if (countedOps > maxScriptOps && p.error.isEmpty) { loop(ScriptProgram(p,ScriptErrorOpCount),opCount) } else p case p : ExecutionInProgressScriptProgram => p.script match { //if at any time we see that the program is not valid //cease script execution case _ if p.script.intersect(Seq(OP_VERIF, OP_VERNOTIF)).nonEmpty => logger.error("Script is invalid even when a OP_VERIF or OP_VERNOTIF occurs in an unexecuted OP_IF branch") loop(ScriptProgram(p, ScriptErrorBadOpCode),opCount) //disabled splice operation case _ if p.script.intersect(Seq(OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT)).nonEmpty => logger.error("Script is invalid because it contains a disabled splice operation") loop(ScriptProgram(p, ScriptErrorDisabledOpCode),opCount) //disabled bitwise operations case _ if p.script.intersect(Seq(OP_INVERT, OP_AND, OP_OR, OP_XOR)).nonEmpty => logger.error("Script is invalid because it contains a disabled bitwise operation") loop(ScriptProgram(p, ScriptErrorDisabledOpCode),opCount) //disabled arithmetic operations case _ if p.script.intersect(Seq(OP_MUL, OP_2MUL, OP_DIV, OP_2DIV, OP_MOD, OP_LSHIFT, OP_RSHIFT)).nonEmpty => logger.error("Script is invalid because it contains a disabled arithmetic operation") loop(ScriptProgram(p, ScriptErrorDisabledOpCode),opCount) //program cannot contain a push operation > 520 bytes case _ if (p.script.exists(token => token.bytes.size > maxPushSize)) => logger.error("We have a script constant that is larger than 520 bytes, this is illegal: " + p.script) loop(ScriptProgram(p, ScriptErrorPushSize),opCount) //program stack size cannot be greater than 1000 elements case _ if ((p.stack.size + p.altStack.size) > 1000) => logger.error("We cannot have a stack + alt stack size larger than 1000 elements") loop(ScriptProgram(p, ScriptErrorStackSize),opCount) //stack operations case OP_DUP :: t => loop(opDup(p),calcOpCount(opCount,OP_DUP)) case OP_DEPTH :: t => loop(opDepth(p),calcOpCount(opCount,OP_DEPTH)) case OP_TOALTSTACK :: t => loop(opToAltStack(p),calcOpCount(opCount,OP_TOALTSTACK)) case OP_FROMALTSTACK :: t => loop(opFromAltStack(p),calcOpCount(opCount,OP_FROMALTSTACK)) case OP_DROP :: t => loop(opDrop(p),calcOpCount(opCount,OP_DROP)) case OP_IFDUP :: t => loop(opIfDup(p),calcOpCount(opCount,OP_IFDUP)) case OP_NIP :: t => loop(opNip(p),calcOpCount(opCount,OP_NIP)) case OP_OVER :: t => loop(opOver(p),calcOpCount(opCount,OP_OVER)) case OP_PICK :: t => loop(opPick(p),calcOpCount(opCount,OP_PICK)) case OP_ROLL :: t => loop(opRoll(p),calcOpCount(opCount,OP_ROLL)) case OP_ROT :: t => loop(opRot(p),calcOpCount(opCount,OP_ROT)) case OP_2ROT :: t => loop(op2Rot(p),calcOpCount(opCount,OP_2ROT)) case OP_2DROP :: t => loop(op2Drop(p),calcOpCount(opCount,OP_2DROP)) case OP_SWAP :: t => loop(opSwap(p),calcOpCount(opCount,OP_SWAP)) case OP_TUCK :: t => loop(opTuck(p),calcOpCount(opCount,OP_TUCK)) case OP_2DUP :: t => loop(op2Dup(p),calcOpCount(opCount,OP_2DUP)) case OP_3DUP :: t => loop(op3Dup(p),calcOpCount(opCount,OP_3DUP)) case OP_2OVER :: t => loop(op2Over(p),calcOpCount(opCount,OP_2OVER)) case OP_2SWAP :: t => loop(op2Swap(p),calcOpCount(opCount,OP_2SWAP)) //arithmetic operations case OP_ADD :: t => loop(opAdd(p),calcOpCount(opCount,OP_ADD)) case OP_1ADD :: t => loop(op1Add(p),calcOpCount(opCount,OP_1ADD)) case OP_1SUB :: t => loop(op1Sub(p),calcOpCount(opCount,OP_1SUB)) case OP_SUB :: t => loop(opSub(p),calcOpCount(opCount,OP_SUB)) case OP_ABS :: t => loop(opAbs(p),calcOpCount(opCount,OP_ABS)) case OP_NEGATE :: t => loop(opNegate(p),calcOpCount(opCount,OP_NEGATE)) case OP_NOT :: t => loop(opNot(p),calcOpCount(opCount,OP_NOT)) case OP_0NOTEQUAL :: t => loop(op0NotEqual(p),calcOpCount(opCount,OP_0NOTEQUAL)) case OP_BOOLAND :: t => loop(opBoolAnd(p),calcOpCount(opCount,OP_BOOLAND)) case OP_BOOLOR :: t => loop(opBoolOr(p),calcOpCount(opCount,OP_BOOLOR)) case OP_NUMEQUAL :: t => loop(opNumEqual(p),calcOpCount(opCount,OP_NUMEQUAL)) case OP_NUMEQUALVERIFY :: t => loop(opNumEqualVerify(p),calcOpCount(opCount,OP_NUMEQUALVERIFY)) case OP_NUMNOTEQUAL :: t => loop(opNumNotEqual(p),calcOpCount(opCount,OP_NUMNOTEQUAL)) case OP_LESSTHAN :: t => loop(opLessThan(p),calcOpCount(opCount,OP_LESSTHAN)) case OP_GREATERTHAN :: t => loop(opGreaterThan(p),calcOpCount(opCount,OP_GREATERTHAN)) case OP_LESSTHANOREQUAL :: t => loop(opLessThanOrEqual(p),calcOpCount(opCount,OP_LESSTHANOREQUAL)) case OP_GREATERTHANOREQUAL :: t => loop(opGreaterThanOrEqual(p),calcOpCount(opCount,OP_GREATERTHANOREQUAL)) case OP_MIN :: t => loop(opMin(p),calcOpCount(opCount,OP_MIN)) case OP_MAX :: t => loop(opMax(p),calcOpCount(opCount,OP_MAX)) case OP_WITHIN :: t => loop(opWithin(p),calcOpCount(opCount,OP_WITHIN)) //bitwise operations case OP_EQUAL :: t => loop(opEqual(p),calcOpCount(opCount,OP_EQUAL)) case OP_EQUALVERIFY :: t => loop(opEqualVerify(p),calcOpCount(opCount,OP_EQUALVERIFY)) case OP_0 :: t => loop(ScriptProgram(p, ScriptNumber.zero :: p.stack, t),calcOpCount(opCount,OP_0)) case (scriptNumberOp : ScriptNumberOperation) :: t => loop(ScriptProgram(p, ScriptNumber(scriptNumberOp.underlying) :: p.stack, t),calcOpCount(opCount,scriptNumberOp)) case (bytesToPushOntoStack: BytesToPushOntoStack) :: t => loop(pushScriptNumberBytesToStack(p),calcOpCount(opCount,bytesToPushOntoStack)) case (scriptNumber: ScriptNumber) :: t => loop(ScriptProgram(p, scriptNumber :: p.stack, t),calcOpCount(opCount,scriptNumber)) case OP_PUSHDATA1 :: t => loop(opPushData1(p),calcOpCount(opCount,OP_PUSHDATA1)) case OP_PUSHDATA2 :: t => loop(opPushData2(p),calcOpCount(opCount,OP_PUSHDATA2)) case OP_PUSHDATA4 :: t => loop(opPushData4(p),calcOpCount(opCount,OP_PUSHDATA4)) case (x : ScriptConstant) :: t => loop(ScriptProgram(p, x :: p.stack, t),calcOpCount(opCount,x)) //control operations case OP_IF :: t => loop(opIf(p),calcOpCount(opCount,OP_IF)) case OP_NOTIF :: t => loop(opNotIf(p),calcOpCount(opCount,OP_NOTIF)) case OP_ELSE :: t => loop(opElse(p),calcOpCount(opCount,OP_ELSE)) case OP_ENDIF :: t => loop(opEndIf(p),calcOpCount(opCount,OP_ENDIF)) case OP_RETURN :: t => loop(opReturn(p),calcOpCount(opCount,OP_RETURN)) case OP_VERIFY :: t => loop(opVerify(p),calcOpCount(opCount,OP_VERIFY)) //crypto operations case OP_HASH160 :: t => loop(opHash160(p),calcOpCount(opCount,OP_HASH160)) case OP_CHECKSIG :: t => loop(opCheckSig(p),calcOpCount(opCount,OP_CHECKSIG)) case OP_CHECKSIGVERIFY :: t => loop(opCheckSigVerify(p),calcOpCount(opCount,OP_CHECKSIGVERIFY)) case OP_SHA1 :: t => loop(opSha1(p),calcOpCount(opCount,OP_SHA1)) case OP_RIPEMD160 :: t => loop(opRipeMd160(p),calcOpCount(opCount,OP_RIPEMD160)) case OP_SHA256 :: t => loop(opSha256(p),calcOpCount(opCount,OP_SHA256)) case OP_HASH256 :: t => loop(opHash256(p),calcOpCount(opCount,OP_HASH256)) case OP_CODESEPARATOR :: t => loop(opCodeSeparator(p),calcOpCount(opCount,OP_CODESEPARATOR)) case OP_CHECKMULTISIG :: t => opCheckMultiSig(p) match { case newProgram : ExecutedScriptProgram => //script was marked invalid for other reasons, don't need to update the opcount loop(newProgram,opCount) case newProgram @ (_ : ExecutionInProgressScriptProgram | _ : PreExecutionScriptProgram) => val newOpCount = calcOpCount(opCount,OP_CHECKMULTISIG) + BitcoinScriptUtil.numPossibleSignaturesOnStack(program).toInt loop(newProgram,newOpCount) } case OP_CHECKMULTISIGVERIFY :: t => opCheckMultiSigVerify(p) match { case newProgram : ExecutedScriptProgram => //script was marked invalid for other reasons, don't need to update the opcount loop(newProgram,opCount) case newProgram @ (_ : ExecutionInProgressScriptProgram | _ : PreExecutionScriptProgram) => val newOpCount = calcOpCount(opCount,OP_CHECKMULTISIGVERIFY) + BitcoinScriptUtil.numPossibleSignaturesOnStack(program).toInt loop(newProgram,newOpCount) } case OP_WITHDRAWPROOFVERIFY :: t => loop(opWithdrawProofVerify(p), calcOpCount(opCount, OP_WITHDRAWPROOFVERIFY)) case OP_REORGPROOFVERIFY :: t => loop(opReorgProofVerify(p),calcOpCount(opCount, OP_REORGPROOFVERIFY)) //reserved operations case OP_NOP :: t => //script discourage upgradeable flag does not apply to a OP_NOP loop(ScriptProgram(p, p.stack, t),calcOpCount(opCount,OP_NOP)) //if we see an OP_NOP and the DISCOURAGE_UPGRADABLE_OP_NOPS flag is set we must fail our program case (nop: NOP) :: t if ScriptFlagUtil.discourageUpgradableNOPs(p.flags) => logger.error("We cannot execute a NOP when the ScriptVerifyDiscourageUpgradableNOPs is set") loop(ScriptProgram(p, ScriptErrorDiscourageUpgradableNOPs),calcOpCount(opCount,nop)) case (nop: NOP) :: t => loop(ScriptProgram(p, p.stack, t),calcOpCount(opCount,nop)) case OP_RESERVED :: t => logger.error("OP_RESERVED automatically marks transaction invalid") loop(ScriptProgram(p,ScriptErrorBadOpCode),calcOpCount(opCount,OP_RESERVED)) case OP_VER :: t => logger.error("Transaction is invalid when executing OP_VER") loop(ScriptProgram(p,ScriptErrorBadOpCode),calcOpCount(opCount,OP_VER)) case OP_RESERVED1 :: t => logger.error("Transaction is invalid when executing OP_RESERVED1") loop(ScriptProgram(p,ScriptErrorBadOpCode),calcOpCount(opCount,OP_RESERVED1)) case OP_RESERVED2 :: t => logger.error("Transaction is invalid when executing OP_RESERVED2") loop(ScriptProgram(p,ScriptErrorBadOpCode),calcOpCount(opCount,OP_RESERVED2)) case (reservedOperation : ReservedOperation) :: t => logger.error("Undefined operation found which automatically fails the script: " + reservedOperation) loop(ScriptProgram(p,ScriptErrorBadOpCode),calcOpCount(opCount,reservedOperation)) //splice operations case OP_SIZE :: t => loop(opSize(p),calcOpCount(opCount,OP_SIZE)) //locktime operations case OP_CHECKLOCKTIMEVERIFY :: t => //check if CLTV is enforced yet if (ScriptFlagUtil.checkLockTimeVerifyEnabled(p.flags)) loop(opCheckLockTimeVerify(p),calcOpCount(opCount,OP_CHECKLOCKTIMEVERIFY)) //if not, check to see if we should discourage p else if (ScriptFlagUtil.discourageUpgradableNOPs(p.flags)) { logger.error("We cannot execute a NOP when the ScriptVerifyDiscourageUpgradableNOPs is set") loop(ScriptProgram(p, ScriptErrorDiscourageUpgradableNOPs),calcOpCount(opCount,OP_CHECKLOCKTIMEVERIFY)) } //in this case, just reat OP_CLTV just like a NOP and remove it from the stack else loop(ScriptProgram(p, p.script.tail, ScriptProgram.Script),calcOpCount(opCount,OP_CHECKLOCKTIMEVERIFY)) case OP_CHECKSEQUENCEVERIFY :: t => //check if CLTV is enforced yet if (ScriptFlagUtil.checkSequenceVerifyEnabled(p.flags)) loop(opCheckSequenceVerify(p),calcOpCount(opCount,OP_CHECKSEQUENCEVERIFY)) //if not, check to see if we should discourage p else if (ScriptFlagUtil.discourageUpgradableNOPs(p.flags)) { logger.error("We cannot execute a NOP when the ScriptVerifyDiscourageUpgradableNOPs is set") loop(ScriptProgram(p, ScriptErrorDiscourageUpgradableNOPs),calcOpCount(opCount,OP_CHECKSEQUENCEVERIFY)) } //in this case, just read OP_CSV just like a NOP and remove it from the stack else loop(ScriptProgram(p, p.script.tail, ScriptProgram.Script),calcOpCount(opCount,OP_CHECKSEQUENCEVERIFY)) //no more script operations to run, return whether the program is valid and the final state of the program case Nil => loop(ScriptProgram.toExecutedProgram(p),opCount) case h :: t => throw new RuntimeException(h + " was unmatched") } } } } /** Checks the validity of a transaction in accordance to bitcoin core's CheckTransaction function * https://github.com/bitcoin/bitcoin/blob/f7a21dae5dbf71d5bc00485215e84e6f2b309d0a/src/main.cpp#L939. */ def checkTransaction(transaction : Transaction) : Boolean = { val inputOutputsNotZero = !(transaction.inputs.isEmpty || transaction.outputs.isEmpty) val txNotLargerThanBlock = transaction.bytes.size < Consensus.maxBlockSize val outputsSpendValidAmountsOfMoney = !transaction.outputs.exists(o => o.value < CurrencyUnits.zero || o.value > Consensus.maxMoney) val outputValues = transaction.outputs.map(_.value) val totalSpentByOutputs : CurrencyUnit = outputValues.fold(CurrencyUnits.zero)(_ + _) val allOutputsValidMoneyRange = validMoneyRange(totalSpentByOutputs) val prevOutputTxIds = transaction.inputs.map(_.previousOutput.txId) val noDuplicateInputs = prevOutputTxIds.distinct.size == prevOutputTxIds.size val isValidScriptSigForCoinbaseTx = transaction.isCoinbase match { case true => transaction.inputs.head.scriptSignature.asmBytes.size >= 2 && transaction.inputs.head.scriptSignature.asmBytes.size <= 100 case false => //since this is not a coinbase tx we cannot have any empty previous outs inside of inputs !transaction.inputs.exists(_.previousOutput == EmptyTransactionOutPoint) } inputOutputsNotZero && txNotLargerThanBlock && outputsSpendValidAmountsOfMoney && noDuplicateInputs && allOutputsValidMoneyRange && noDuplicateInputs && isValidScriptSigForCoinbaseTx } /** Determines if the given currency unit is within the valid range for the system */ def validMoneyRange(currencyUnit : CurrencyUnit) : Boolean = { currencyUnit >= CurrencyUnits.zero && currencyUnit <= Consensus.maxMoney } /** Calculates the new op count after the execution of the given [[ScriptToken]] */ private def calcOpCount(oldOpCount: Int, token: ScriptToken):Int = BitcoinScriptUtil.countsTowardsScriptOpLimit(token) match { case true => oldOpCount + 1 case false => oldOpCount } /** Checks if the transaction contained a witness that we did not use * [[https://github.com/bitcoin/bitcoin/blob/528472111b4965b1a99c4bcf08ac5ec93d87f10f/src/script/interpreter.cpp#L1515-L1523]] * Return true if witness was NOT used, return false if witness was used. */ private def hasUnexpectedWitness(program: ScriptProgram): Boolean = { val txSigComponent = program.txSignatureComponent logger.debug("TxSigComponent: " + txSigComponent) val unexpectedWitness = txSigComponent match { case b : BaseTransactionSignatureComponent => b.transaction match { case wtx : WitnessTransaction => wtx.witness.witnesses(txSigComponent.inputIndex.toInt).stack.nonEmpty case _ : BaseTransaction => false } case w : WitnessV0TransactionSignatureComponent => val witnessedUsed = w.scriptPubKey match { case _ : WitnessScriptPubKey => true case _ : P2SHScriptPubKey => val p2shScriptSig = P2SHScriptSignature(txSigComponent.scriptSignature.bytes) p2shScriptSig.redeemScript.isInstanceOf[WitnessScriptPubKey] case _ : CLTVScriptPubKey | _ : CSVScriptPubKey | _ : MultiSignatureScriptPubKey | _ : NonStandardScriptPubKey | _ : P2PKScriptPubKey | _ : P2PKHScriptPubKey | _ : WitnessCommitment | EmptyScriptPubKey => w.witness.stack.isEmpty } !witnessedUsed } if (unexpectedWitness) logger.error("Found unexpected witness that was not used by the ScriptProgram: " + program) unexpectedWitness } } object ScriptInterpreter extends ScriptInterpreter
SuredBits/bitcoin-s-sidechains
src/main/scala/org/bitcoins/core/script/interpreter/ScriptInterpreter.scala
Scala
mit
32,742
/* * Copyright 2017 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.computations.formats import play.api.libs.json.Json import uk.gov.hmrc.ct.computations.LEC01 object Cars { def lec01FromJsonString(json: String): LEC01 = Json.fromJson[LEC01](Json.parse(json)).get def toJsonString(lec01: LEC01): String = Json.toJson(lec01).toString() def asBoxString(lec01: LEC01): Option[String] = Some(toJsonString(lec01)) }
pncampbell/ct-calculations
src/main/scala/uk/gov/hmrc/ct/computations/formats/Cars.scala
Scala
apache-2.0
984
package views package html.puzzle import controllers.routes import lila.api.Context import lila.app.templating.Environment._ import lila.app.ui.ScalatagsTemplate._ import lila.puzzle.PuzzleTheme object theme { def list(themes: List[(lila.i18n.I18nKey, List[PuzzleTheme.WithCount])])(implicit ctx: Context) = views.html.base.layout( title = "Puzzle themes", moreCss = cssTag("puzzle.page") )( main(cls := "page-menu")( bits.pageMenu("themes"), div(cls := "page-menu__content box")( h1(trans.puzzle.puzzleThemes()), div(cls := "puzzle-themes")( themes map { case (cat, themes) => frag( h2(cat()), div( cls := List( "puzzle-themes__list" -> true, cat.key.replace(":", "-") -> true ) )( themes.map { pt => val url = if (pt.theme == PuzzleTheme.mix) routes.Puzzle.home else routes.Puzzle.show(pt.theme.key.value) a(cls := "puzzle-themes__link", href := (pt.count > 0).option(url.url))( span( h3( pt.theme.name(), em(pt.count.localize) ), span(pt.theme.description()) ) ) }, cat.key == "puzzle:origin" option a(cls := "puzzle-themes__link", href := routes.Puzzle.ofPlayer())( span( h3(trans.puzzleTheme.playerGames()), span(trans.puzzleTheme.playerGamesDescription()) ) ) ) ) }, p(cls := "puzzle-themes__db text", dataIcon := "")( trans.puzzleTheme.puzzleDownloadInformation( a(href := "https://database.lichess.org/")("database.lichess.org") ) ) ) ) ) ) }
luanlv/lila
app/views/puzzle/theme.scala
Scala
mit
2,172
package sgl package analytics import sgl.util.LoggingProvider /** A GameStateComponent that automates analytics. * * This extends default game state implementation with * an implementation that automatically tracks game screen * navigation. * * If you want to use it, make sure to mix it in AFTER mixin the * standard App trait, since that trait provides the default * GameStateComponent, and you want this one to override it. * * We offer this as a separate component, so that client can * choose to not use analytics (no dependency to analytics in * the default GameStateComponent) or can choose more * fine grained way to track game screens, if necessary. */ trait GameStateAutoAnalyticsComponent extends GameStateComponent { this: GraphicsProvider with SystemProvider with LoggingProvider with AnalyticsProvider => override val gameState: GameState = new GameStateAutoAnalytics class GameStateAutoAnalytics extends GameState { override def pushScreen(screen: GameScreen): Unit = { Analytics.setGameScreen(screen) super.pushScreen(screen) } override def newScreen(screen: GameScreen): Unit = { Analytics.setGameScreen(screen) super.newScreen(screen) } } }
regb/scala-game-library
core/src/main/scala/sgl/analytics/GameStateAutoAnalyticsComponent.scala
Scala
mit
1,240
/* * ____ ____ _____ ____ ___ ____ * | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R) * | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data * | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc. * |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved. * * This program is free software: you can redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the Free Software Foundation, either version * 3 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License along with this * program. If not, see <http://www.gnu.org/licenses/>. * */ package com.precog.yggdrasil package actor import metadata._ import com.precog.util._ import com.precog.common._ import com.precog.common.ingest._ import com.precog.util.PrecogUnit import com.precog.yggdrasil.table._ import com.precog.yggdrasil.util._ import akka.actor.Actor import akka.actor.Props import akka.actor.Scheduler import akka.actor.ActorRef import akka.actor.PoisonPill import akka.dispatch.Future import akka.dispatch.ExecutionContext import akka.pattern.ask import akka.util.{Duration, DurationLong, Timeout} import akka.util.duration._ import blueeyes.json._ import blueeyes.persistence.cache.Cache import blueeyes.persistence.cache.CacheSettings import blueeyes.persistence.cache.ExpirationPolicy import org.slf4j._ import java.io.File import java.util.concurrent.TimeUnit import scala.annotation.tailrec import scala.collection.mutable import scalaz._ import scalaz.Validation._ import scalaz.effect._ import scalaz.std.list._ import scalaz.std.option._ import scalaz.syntax.id._ import scalaz.syntax.show._ import scalaz.syntax.std.option._ import scalaz.syntax.traverse._ case class InsertComplete(path: Path) case class ArchiveComplete(path: Path)
precog/platform
yggdrasil/src/main/scala/com/precog/yggdrasil/actor/ProjectionsActor.scala
Scala
agpl-3.0
2,252
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.stat import java.util.Random import org.scalatest.FunSuite import org.apache.spark.SparkException import org.apache.spark.mllib.linalg.{DenseVector, Matrices, Vectors} import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.stat.test.ChiSqTest import org.apache.spark.mllib.util.MLlibTestSparkContext import org.apache.spark.mllib.util.TestingUtils._ class HypothesisTestSuite extends FunSuite with MLlibTestSparkContext { test("chi squared pearson goodness of fit") { val observed = new DenseVector(Array[Double](4, 6, 5)) val pearson = Statistics.chiSqTest(observed) // Results validated against the R command `chisq.test(c(4, 6, 5), p=c(1/3, 1/3, 1/3))` assert(pearson.statistic === 0.4) assert(pearson.degreesOfFreedom === 2) assert(pearson.pValue ~== 0.8187 relTol 1e-4) assert(pearson.method === ChiSqTest.PEARSON.name) assert(pearson.nullHypothesis === ChiSqTest.NullHypothesis.goodnessOfFit.toString) // different expected and observed sum val observed1 = new DenseVector(Array[Double](21, 38, 43, 80)) val expected1 = new DenseVector(Array[Double](3, 5, 7, 20)) val pearson1 = Statistics.chiSqTest(observed1, expected1) // Results validated against the R command // `chisq.test(c(21, 38, 43, 80), p=c(3/35, 1/7, 1/5, 4/7))` assert(pearson1.statistic ~== 14.1429 relTol 1e-4) assert(pearson1.degreesOfFreedom === 3) assert(pearson1.pValue ~== 0.002717 relTol 1e-4) assert(pearson1.method === ChiSqTest.PEARSON.name) assert(pearson1.nullHypothesis === ChiSqTest.NullHypothesis.goodnessOfFit.toString) // Vectors with different sizes val observed3 = new DenseVector(Array(1.0, 2.0, 3.0)) val expected3 = new DenseVector(Array(1.0, 2.0, 3.0, 4.0)) intercept[IllegalArgumentException](Statistics.chiSqTest(observed3, expected3)) // negative counts in observed val negObs = new DenseVector(Array(1.0, 2.0, 3.0, -4.0)) intercept[IllegalArgumentException](Statistics.chiSqTest(negObs, expected1)) // count = 0.0 in expected but not observed val zeroExpected = new DenseVector(Array(1.0, 0.0, 3.0)) val inf = Statistics.chiSqTest(observed, zeroExpected) assert(inf.statistic === Double.PositiveInfinity) assert(inf.degreesOfFreedom === 2) assert(inf.pValue === 0.0) assert(inf.method === ChiSqTest.PEARSON.name) assert(inf.nullHypothesis === ChiSqTest.NullHypothesis.goodnessOfFit.toString) // 0.0 in expected and observed simultaneously val zeroObserved = new DenseVector(Array(2.0, 0.0, 1.0)) intercept[IllegalArgumentException](Statistics.chiSqTest(zeroObserved, zeroExpected)) } test("chi squared pearson matrix independence") { val data = Array(40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0) // [[40.0, 56.0, 31.0, 30.0], // [24.0, 32.0, 10.0, 15.0], // [29.0, 42.0, 0.0, 12.0]] val chi = Statistics.chiSqTest(Matrices.dense(3, 4, data)) // Results validated against R command // `chisq.test(rbind(c(40, 56, 31, 30),c(24, 32, 10, 15), c(29, 42, 0, 12)))` assert(chi.statistic ~== 21.9958 relTol 1e-4) assert(chi.degreesOfFreedom === 6) assert(chi.pValue ~== 0.001213 relTol 1e-4) assert(chi.method === ChiSqTest.PEARSON.name) assert(chi.nullHypothesis === ChiSqTest.NullHypothesis.independence.toString) // Negative counts val negCounts = Array(4.0, 5.0, 3.0, -3.0) intercept[IllegalArgumentException](Statistics.chiSqTest(Matrices.dense(2, 2, negCounts))) // Row sum = 0.0 val rowZero = Array(0.0, 1.0, 0.0, 2.0) intercept[IllegalArgumentException](Statistics.chiSqTest(Matrices.dense(2, 2, rowZero))) // Column sum = 0.0 val colZero = Array(0.0, 0.0, 2.0, 2.0) // IllegalArgumentException thrown here since it's thrown on driver, not inside a task intercept[IllegalArgumentException](Statistics.chiSqTest(Matrices.dense(2, 2, colZero))) } test("chi squared pearson RDD[LabeledPoint]") { // labels: 1.0 (2 / 6), 0.0 (4 / 6) // feature1: 0.5 (1 / 6), 1.5 (2 / 6), 3.5 (3 / 6) // feature2: 10.0 (1 / 6), 20.0 (1 / 6), 30.0 (2 / 6), 40.0 (2 / 6) val data = Seq( LabeledPoint(0.0, Vectors.dense(0.5, 10.0)), LabeledPoint(0.0, Vectors.dense(1.5, 20.0)), LabeledPoint(1.0, Vectors.dense(1.5, 30.0)), LabeledPoint(0.0, Vectors.dense(3.5, 30.0)), LabeledPoint(0.0, Vectors.dense(3.5, 40.0)), LabeledPoint(1.0, Vectors.dense(3.5, 40.0))) for (numParts <- List(2, 4, 6, 8)) { val chi = Statistics.chiSqTest(sc.parallelize(data, numParts)) val feature1 = chi(0) assert(feature1.statistic === 0.75) assert(feature1.degreesOfFreedom === 2) assert(feature1.pValue ~== 0.6873 relTol 1e-4) assert(feature1.method === ChiSqTest.PEARSON.name) assert(feature1.nullHypothesis === ChiSqTest.NullHypothesis.independence.toString) val feature2 = chi(1) assert(feature2.statistic === 1.5) assert(feature2.degreesOfFreedom === 3) assert(feature2.pValue ~== 0.6823 relTol 1e-4) assert(feature2.method === ChiSqTest.PEARSON.name) assert(feature2.nullHypothesis === ChiSqTest.NullHypothesis.independence.toString) } // Test that the right number of results is returned val numCols = 1001 val sparseData = Array( new LabeledPoint(0.0, Vectors.sparse(numCols, Seq((100, 2.0)))), new LabeledPoint(0.1, Vectors.sparse(numCols, Seq((200, 1.0))))) val chi = Statistics.chiSqTest(sc.parallelize(sparseData)) assert(chi.size === numCols) assert(chi(1000) != null) // SPARK-3087 // Detect continous features or labels val random = new Random(11L) val continuousLabel = Seq.fill(100000)(LabeledPoint(random.nextDouble(), Vectors.dense(random.nextInt(2)))) intercept[SparkException] { Statistics.chiSqTest(sc.parallelize(continuousLabel, 2)) } val continuousFeature = Seq.fill(100000)(LabeledPoint(random.nextInt(2), Vectors.dense(random.nextDouble()))) intercept[SparkException] { Statistics.chiSqTest(sc.parallelize(continuousFeature, 2)) } } }
trueyao/spark-lever
mllib/src/test/scala/org/apache/spark/mllib/stat/HypothesisTestSuite.scala
Scala
apache-2.0
7,001
/* * Copyright (C) 2005 - 2019 Schlichtherle IT Services. * All rights reserved. Use is subject to license terms. */ package global.namespace.truelicense.tests.swing import global.namespace.truelicense.api.{ConsumerLicenseManager, License, LicenseManagementException} import global.namespace.truelicense.swing.LicenseManagementWizard import global.namespace.truelicense.tests.core.TestContext import global.namespace.truelicense.tests.swing.LicenseManagementWizardITLike._ import global.namespace.truelicense.ui.LicenseWizardMessage import global.namespace.truelicense.ui.LicenseWizardMessage._ import global.namespace.truelicense.ui.wizard.WizardMessage._ import org.netbeans.jemmy._ import org.netbeans.jemmy.operators._ import org.netbeans.jemmy.util._ import org.scalactic.source.Position import org.scalatest.BeforeAndAfter import org.scalatest.matchers.should.Matchers._ import org.scalatest.wordspec.AnyWordSpecLike import java.awt.{Component, EventQueue, GraphicsEnvironment} import java.util.Date import javax.swing._ trait LicenseManagementWizardITLike extends AnyWordSpecLike with BeforeAndAfter { this: TestContext => private val laf = UIManager.getLookAndFeel private var installed: License = _ private var manager: ConsumerLicenseManager = _ private var wizard: LicenseManagementWizard = _ private var dialog: JDialogOperator = _ private var cancelButton, backButton, nextButton: AbstractButtonOperator = _ JemmyProperties.setCurrentOutput(TestOut.getNullOutput) // shut up! before { new State { installed = (vendorManager generateKeyFrom licenseBean saveTo consumerStore).license manager = consumerManager EventQueue invokeLater (() => { UIManager setLookAndFeel UIManager.getSystemLookAndFeelClassName wizard = newLicenseManagementWizard(manager) wizard.showModalDialog() }) dialog = new JDialogOperator() cancelButton = waitButton(dialog, wizard_cancel) backButton = waitButton(dialog, wizard_back) nextButton = waitButton(dialog, wizard_next) // Defer test execution to allow asynchronous license certificate // verification to complete. Thread sleep 100 } } after { cancelButton.doClick() dialog.isVisible shouldBe false wizard.getReturnCode shouldBe LicenseManagementWizard.CANCEL_RETURN_CODE UIManager setLookAndFeel laf } "A license wizard" when { "using a consumer license manager with an installed license key" when { "showing" should { "be modal" ifNotHeadless { dialog.isModal shouldBe true } "have a title which includes the licensing management subject" ifNotHeadless { dialog.getTitle should include(managementContext.subject) } "have its back button disabled" ifNotHeadless { backButton.isEnabled shouldBe false } "have its next button enabled" ifNotHeadless { nextButton.isEnabled shouldBe true } "have its cancel button enabled" ifNotHeadless { cancelButton.isEnabled shouldBe true } "show its welcome panel and hide the other panels" ifNotHeadless { welcomePanel.isVisible shouldBe true installPanel.isVisible shouldBe false displayPanel.isVisible shouldBe false uninstallPanel.isVisible shouldBe false } "have a visible, non-empty prompt on its welcome panel" ifNotHeadless { dialog.getQueueTool.waitEmpty() val prompt = waitTextComponent(welcomePanel, welcome_prompt) prompt.isVisible shouldBe true prompt.getText.isEmpty shouldBe false } "have both the install and display buttons enabled" ifNotHeadless { val installSelector = waitButton(welcomePanel, welcome_install) val displaySelector = waitButton(welcomePanel, welcome_display) installSelector.isEnabled shouldBe true displaySelector.isEnabled shouldBe true } "switch to the install panel when requested" ifNotHeadless { val installSelector = waitButton(welcomePanel, welcome_install) installSelector.isVisible shouldBe true installSelector.isEnabled shouldBe true installSelector.isSelected shouldBe false installSelector.doClick() nextButton.doClick() welcomePanel.isVisible shouldBe false installPanel.isVisible shouldBe true waitButton(installPanel, install_install).isEnabled shouldBe false } "switch to the display panel by default and display the license content" ifNotHeadless { val displaySelector = waitButton(welcomePanel, welcome_display) displaySelector.isVisible shouldBe true displaySelector.isEnabled shouldBe true displaySelector.isSelected shouldBe true nextButton.doClick() welcomePanel.isVisible shouldBe false displayPanel.isVisible shouldBe true def waitText(key: LicenseWizardMessage) = waitTextComponent(displayPanel, key).getText def format(date: Date) = display_dateTimeFormat(managementContext.subject, date) waitText(display_holder) shouldBe toString(installed.getHolder) waitText(display_subject) shouldBe toString(installed.getSubject) waitText(display_consumer) shouldBe (toString(installed.getConsumerType) + " / " + installed.getConsumerAmount) waitText(display_notBefore) shouldBe format(installed.getNotBefore) waitText(display_notAfter) shouldBe format(installed.getNotAfter) waitText(display_issuer) shouldBe toString(installed.getIssuer) waitText(display_issued) shouldBe format(installed.getIssued) waitText(display_info) shouldBe toString(installed.getInfo) } "switch to the uninstall panel when requested" ifNotHeadless { val uninstallSelector = waitButton(welcomePanel, welcome_uninstall) uninstallSelector.isVisible shouldBe true uninstallSelector.isEnabled shouldBe true uninstallSelector.isSelected shouldBe false uninstallSelector.doClick() nextButton.doClick() welcomePanel.isVisible shouldBe false uninstallPanel.isVisible shouldBe true waitButton(uninstallPanel, uninstall_uninstall).doClick() Thread sleep 100 intercept[LicenseManagementException](manager.load()) } } } } private implicit class WithText(text: String) { def ifNotHeadless(block: => Any)(implicit pos: Position): Unit = { if (GraphicsEnvironment.isHeadless) { text ignore block } else { text in block } } } private def welcomePanel = waitPanel("WelcomePanel") private def installPanel = waitPanel("InstallPanel") private def displayPanel = waitPanel("DisplayPanel") private def uninstallPanel = waitPanel("UninstallPanel") private def waitPanel(name: String) = new JComponentOperator(dialog, new ComponentChooser { val delegate = new NameComponentChooser(name) def checkComponent(comp: Component): Boolean = comp match { case panel: JPanel => delegate checkComponent panel case _ => false } def getDescription = "Chooses a JPanel by its name." }) private def toString(obj: AnyRef) = { if (null ne obj) { obj.toString } else { "" } } } object LicenseManagementWizardITLike { private def newLicenseManagementWizard(manager: ConsumerLicenseManager) = { val wizard = new LicenseManagementWizard(manager) wizard.isUninstallButtonVisible shouldBe false wizard setUninstallButtonVisible true wizard.isUninstallButtonVisible shouldBe true wizard } private def waitButton(cont: ContainerOperator, key: Enum[_]) = new AbstractButtonOperator(cont, new NameComponentChooser(key.name)) private def waitTextComponent(cont: ContainerOperator, key: Enum[_]) = new JTextComponentOperator(cont, new NameComponentChooser(key.name)) }
christian-schlichtherle/truelicense
tests/src/test/scala/global/namespace/truelicense/tests/swing/LicenseManagementWizardITLike.scala
Scala
apache-2.0
8,144