code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.keras.{Masking => BigDLMasking}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.Net
import scala.reflect.ClassTag
/**
* Use a mask value to skip timesteps for a sequence.
* Masks a sequence by using a mask value to skip timesteps.
*
* When you use this layer as the first layer of a model, you need to provide the argument
* inputShape (a Single Shape, does not include the batch dimension).
*
* @param maskValue Double, mask value.
* For each timestep in the input (the second dimension),
* if all the values in the input at that timestep are equal to 'maskValue',
* then the timestep will masked (skipped) in all downstream layers.
* @param inputShape A Single Shape, does not include the batch dimension.
* @tparam T Numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class Masking[T: ClassTag](
override val maskValue: Double = 0.0,
override val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends BigDLMasking[T](
maskValue, inputShape) with Net {
}
object Masking {
def apply[@specialized(Float, Double) T: ClassTag](
maskValue: Double = 0.0,
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): Masking[T] = {
new Masking[T](maskValue, inputShape)
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/Masking.scala
|
Scala
|
apache-2.0
| 2,118 |
package org.jetbrains.plugins.scala.lang.psi.stubs.elements.signatures
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{StubElement, StubInputStream, StubOutputStream}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameterClause
import org.jetbrains.plugins.scala.lang.psi.impl.statements.params.ScParameterClauseImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.ScParamClauseStub
import org.jetbrains.plugins.scala.lang.psi.stubs.elements.ScStubElementType
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScParamClauseStubImpl
/**
* User: Alexander Podkhalyuzin
* Date: 19.10.2008
*/
class ScParamClauseElementType extends ScStubElementType[ScParamClauseStub, ScParameterClause]("parameter clause") {
override def serialize(stub: ScParamClauseStub, dataStream: StubOutputStream): Unit = {
dataStream.writeBoolean(stub.isImplicit)
}
override def deserialize(dataStream: StubInputStream, parentStub: StubElement[_ <: PsiElement]): ScParamClauseStub =
new ScParamClauseStubImpl(parentStub, this,
isImplicit = dataStream.readBoolean)
override def createStubImpl(parameterClause: ScParameterClause, parentStub: StubElement[_ <: PsiElement]): ScParamClauseStub =
new ScParamClauseStubImpl(parentStub, this,
isImplicit = parameterClause.isImplicit)
override def createElement(node: ASTNode): ScParameterClause = new ScParameterClauseImpl(node)
override def createPsi(stub: ScParamClauseStub): ScParameterClause = new ScParameterClauseImpl(stub)
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/signatures/ScParamClauseElementType.scala
|
Scala
|
apache-2.0
| 1,573 |
package memnets.fx.fx3d
import memnets.model.Element
import memnets.ui.TickableUI
import scalafx.scene.Node
trait Tickable3DFX extends TickableUI[Node] {
def element: Element = {
if (node.isDefined)
node.get.userData.asInstanceOf[Element]
else
null
}
def element_=(r: Element): Unit = {
if (node.isDefined)
node.get.userData = r
}
}
|
MemoryNetworks/memnets
|
fx/src/main/scala/memnets/fx/fx3d/Tickable3DFX.scala
|
Scala
|
apache-2.0
| 373 |
import swing.{BorderPanel, BoxPanel, FlowPanel, GridPanel, Panel, Button, Label, Swing, ProgressBar, Orientation, TextField, Table}
import swing.event._
import swing.BorderPanel.Position._
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import java.awt.event.{MouseEvent, KeyEvent, ActionEvent}
import javax.swing.{BorderFactory, JPanel, JComponent, KeyStroke, AbstractAction}
import javax.swing.plaf.basic._
import java.awt.{Toolkit, Font, Color, FileDialog, Dimension}
import java.awt.datatransfer.Clipboard
import java.awt.datatransfer.StringSelection
import scala.concurrent._
import ExecutionContext.Implicits.global
import java.util.concurrent.atomic.AtomicReference
import org.scilab.forge.jlatexmath.{TeXFormula, TeXConstants, TeXIcon}
/*calc_import-BEGIN*/
import DEAK._
/*calc_import-END*/
import Parser._
import PrintCalc._
import Proofsearch.derTree
class ProofSearchPopup(locale : List[Locale] = List(Empty()), seq : Sequent, depth : Int = 5, useRules : List[Rule] = ruleList) extends BorderPanel with Popup {
// the following code (interruptableFuture) is from http://stackoverflow.com/questions/16020964/cancellation-with-future-and-promise-in-scala
def interruptableFuture[T](fun: () => T)(implicit ex: ExecutionContext): (Future[T], () => Boolean) = {
val p = Promise[T]()
val f = p.future
val aref = new AtomicReference[Thread](null)
p tryCompleteWith Future {
val thread = Thread.currentThread
aref.synchronized { aref.set(thread) }
try fun() finally {
val wasInterrupted = (aref.synchronized { aref getAndSet null }) ne thread
//Deal with interrupted flag of this thread in desired
}
}
(f, () => {
aref.synchronized { Option(aref getAndSet null) foreach { _.interrupt() } }
p.tryFailure(new CancellationException)
})
}
var pt:Option[Prooftree] = None
var cancel :() => Boolean = {() => true}
var cancelled = false
def open() = {
focusable = true
requestFocus
val (f, c) = interruptableFuture[Option[Prooftree]] { () =>
derTree(depth, locale, seq, 0, useRules)
}
cancel = c
f.onSuccess {
case result =>
pt = result
close()
}
f.onFailure {
case ex =>
println(ex.getClass)
close()
}
}
override def close():Unit = {
cancel()
visible = false
}
background = new Color(238,238,238)
border = Swing.EmptyBorder(15,15,15,15)
layout(new BoxPanel(Orientation.Vertical) {
contents += new Label("PROOF SEARCH") {
font = new Font("Roboto-Bold", Font.BOLD, 16)
foreground = new Color(33,33,33)
}
contents += new Label("Searching for a prooftree") {
font = new Font("Roboto-Light", Font.PLAIN, 12)
border = Swing.EmptyBorder(15,0,15,0)
foreground = new Color(33,33,33)
}
contents += new ProgressBar{
border = Swing.EmptyBorder(0,5,0,5)
indeterminate = true
}
opaque = false
}) = Center
val cancelButton = new Button(swing.Action("Cancel popup") { cancelled = true; close() }) {
text = "CANCEL"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(15, 0, 0, 0)
foreground = new Color(66,133,244)
}
listenTo(keys)
reactions += {
case KeyReleased(_, Key.Escape, _, _) =>
cancelled = true; close()
}
focusable = true
layout(new FlowPanel(FlowPanel.Alignment.Right)( cancelButton ){opaque = false}) = South
}
class ErrorPopup(message:String) extends BorderPanel with Popup {
override def close() = {
visible = false
}
background = new Color(238,238,238)
border = Swing.EmptyBorder(15,15,15,15)
layout(new BoxPanel(Orientation.Vertical) {
//border = Swing.EmptyBorder(5,5,5,5)
contents += new Label("ERROR") {
font = new Font("Roboto-Bold", Font.BOLD, 16)
foreground = new Color(33,33,33)
}
contents += new Label(message) {
font = new Font("Roboto-Light", Font.PLAIN, 12)
border = Swing.EmptyBorder(15,0,15,0)
foreground = new Color(33,33,33)
}
opaque = false
}) = Center
val okButton = new Button(swing.Action("Close popup") { close() }) {
text = "OK"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 0)
foreground = new Color(66,133,244)
}
listenTo(keys)
reactions += {
case KeyReleased(_, Key.Escape, _, _) | KeyReleased(_, Key.Enter, _, _) =>
close()
}
focusable = true
layout(new FlowPanel(FlowPanel.Alignment.Right)( okButton ){opaque = false}) = South //{ cancel(); close() } )) = South
}
class ParsePopup[T](parser:String => Option[T], toStr:T => String, t:String) extends BorderPanel with Popup {
override def close() = {
visible = false
}
val parsedBottomBarColor = new Color(51,172,113)
val unParsedBottomBarColor = new Color(255,139,129)
background = parsedBottomBarColor
border = Swing.EmptyBorder(15,15,15,15)
val inputField = new TextField {
columns = 25
opaque = false
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
//minimumSize = new Dimension(350, 50)
//maximumSize = new Dimension(800, 50)
//preferredSize = new Dimension(800, 50)
border = BorderFactory.createMatteBorder(0, 0, 1, 0, Color.white)
focusable = true
}
val parsedStrIcon = new Label {
icon = new TeXFormula("").createTeXIcon(TeXConstants.STYLE_DISPLAY, 15)
foreground = Color.white
}
val parsedStrIcon_aux = new BorderPanel{
layout (parsedStrIcon) = Center
background = parsedBottomBarColor
}
val parsedStrIconScrollPane = new PrettyScrollPane(parsedStrIcon_aux){
preferredSize = new Dimension(300, 50)
}
var parsed:Option[T] = None
listenTo(inputField.keys, keys)
reactions += {
case KeyReleased(_, Key.Escape, _, _) =>
parsed = None
close
case KeyReleased(`inputField`, k, _, _) =>
parser(inputField.text) match {
case Some(r) =>
parsedStrIcon.icon = new TeXFormula(toStr(r)).createTeXIcon(TeXConstants.STYLE_DISPLAY, 15)
background = parsedBottomBarColor
parsedStrIcon_aux.background = parsedBottomBarColor
parsed = Some(r)
if(k == Key.Enter) close()
case None =>
parsedStrIcon.icon = new TeXFormula(inputField.text).createTeXIcon(TeXConstants.STYLE_DISPLAY, 15)
background = unParsedBottomBarColor
parsedStrIcon_aux.background = unParsedBottomBarColor
parsed = None
}
}
lazy val bottomBar = new GridPanel(1,2) {
contents += new BorderPanel {
layout(inputField) = Center
opaque = false
border = Swing.EmptyBorder(12, 5, 12, 5)
}
contents += parsedStrIconScrollPane
// layout (parsedStrIconScrollPane) = East
opaque = false
}
layout(new FlowPanel(FlowPanel.Alignment.Left)(new Label("INPUT " + t) {
font = new Font("Roboto-Bold", Font.BOLD, 16)
foreground = Color.white
}){opaque = false}) = North
layout(bottomBar) = Center
val okButton = new Button(swing.Action("Close popup") { close() }) {
text = "OK"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 0)
foreground = Color.white
}
val cancelButton = new Button(swing.Action("Cancel popup") { parsed = None; close() }) {
text = "CANCEL"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 10)
foreground = Color.white
}
layout(new FlowPanel(FlowPanel.Alignment.Right)( cancelButton, okButton ){opaque = false}) = South //{ cancel(); close() } )) = South
}
class RelAKAParsePopup extends BorderPanel with Popup {
override def close() = {
visible = false
}
val parsedBottomBarColor = new Color(51,172,113)
val unParsedBottomBarColor = new Color(255,139,129)
background = parsedBottomBarColor
border = Swing.EmptyBorder(15,15,15,15)
val inputFieldA = new TextField {
columns = 25
opaque = false
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = BorderFactory.createMatteBorder(0, 0, 1, 0, Color.white)
}
val inputFieldAg = new TextField {
columns = 25
opaque = false
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = BorderFactory.createMatteBorder(0, 0, 1, 0, Color.white)
}
val inputFieldA2 = new TextField {
columns = 25
opaque = false
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = BorderFactory.createMatteBorder(0, 0, 1, 0, Color.white)
}
lazy val barA = new BoxPanel(Orientation.Horizontal) {
contents += new Label("Action: ") {
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = Swing.EmptyBorder(0, 0, 0, 10)
}
contents += inputFieldA
//opaque = false
background = parsedBottomBarColor
border = Swing.EmptyBorder(10, 0, 10, 0)
}
lazy val barAg = new BoxPanel(Orientation.Horizontal) {
contents += new Label("Agent: ") {
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = Swing.EmptyBorder(0, 0, 0, 10)
}
contents += inputFieldAg
background = parsedBottomBarColor
border = Swing.EmptyBorder(10, 0, 10, 0)
}
lazy val barA2 = new BoxPanel(Orientation.Horizontal) {
contents += new Label("Action: ") {
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = Swing.EmptyBorder(0, 0, 0, 10)
}
contents += inputFieldA2
background = parsedBottomBarColor
border = Swing.EmptyBorder(10, 0, 10, 0)
}
var parsedA:Option[Action] = None
var parsedAg:Option[Agent] = None
var parsedA2:Option[Action] = None
listenTo(inputFieldA.keys, inputFieldAg.keys, inputFieldA2.keys)
reactions += {
case KeyReleased(`inputFieldA`, k, _, _) =>
parseAction(inputFieldA.text) match {
case Some(r) =>
parsedA = Some(r)
barA.background = parsedBottomBarColor
if(k == Key.Enter && parsedAg != None && parsedA2 != None) close()
case None =>
barA.background = unParsedBottomBarColor
parsedA = None
}
case KeyReleased(`inputFieldAg`, k, _, _) =>
parseAgent(inputFieldAg.text) match {
case Some(r) =>
parsedAg = Some(r)
barAg.background = parsedBottomBarColor
if(k == Key.Enter && parsedA != None && parsedA2 != None) close()
case None =>
barAg.background = unParsedBottomBarColor
parsedAg = None
}
case KeyReleased(`inputFieldA2`, k, _, _) =>
parseAction(inputFieldA2.text) match {
case Some(r) =>
parsedA2 = Some(r)
barA2.background = parsedBottomBarColor
if(k == Key.Enter && parsedAg != None && parsedA != None) close()
case None =>
barA2.background = unParsedBottomBarColor
parsedA2 = None
}
}
layout(new FlowPanel(FlowPanel.Alignment.Left)(new Label("INPUT A RELAKA") {
font = new Font("Roboto-Bold", Font.BOLD, 16)
foreground = Color.white
}){opaque = false}) = North
layout(new BoxPanel(Orientation.Vertical) {
contents += barA
contents += barAg
contents += barA2
opaque = false
}) = Center
val okButton = new Button(swing.Action("Close popup") { close() }) {
text = "OK"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 0)
foreground = Color.white
}
val cancelButton = new Button(swing.Action("Cancel popup") {
parsedA = None
parsedAg = None
parsedA2 = None
close()
}) {
text = "CANCEL"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 10)
foreground = Color.white
}
layout(new FlowPanel(FlowPanel.Alignment.Right)( cancelButton, okButton ){opaque = false}) = South //{ cancel(); close() } )) = South
}
class PreFormParsePopup extends BorderPanel with Popup {
override def close() = {
visible = false
}
val parsedBottomBarColor = new Color(51,172,113)
val unParsedBottomBarColor = new Color(255,139,129)
background = parsedBottomBarColor
border = Swing.EmptyBorder(15,15,15,15)
val inputFieldA = new TextField {
columns = 25
opaque = false
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = BorderFactory.createMatteBorder(0, 0, 1, 0, Color.white)
}
val inputFieldF = new TextField {
columns = 25
opaque = false
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = BorderFactory.createMatteBorder(0, 0, 1, 0, Color.white)
}
lazy val barA = new BoxPanel(Orientation.Horizontal) {
contents += new Label("Action: ") {
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = Swing.EmptyBorder(0, 0, 0, 10)
}
contents += inputFieldA
//opaque = false
background = parsedBottomBarColor
border = Swing.EmptyBorder(10, 0, 10, 0)
}
lazy val barF = new BoxPanel(Orientation.Horizontal) {
contents += new Label("Formula: ") {
font = new Font("Roboto-Bold", Font.BOLD, 12)
foreground = Color.white
border = Swing.EmptyBorder(0, 0, 0, 10)
}
contents += inputFieldF
background = parsedBottomBarColor
border = Swing.EmptyBorder(10, 0, 10, 0)
}
var parsedA:Option[Action] = None
var parsedF:Option[Formula] = None
listenTo(inputFieldA.keys, inputFieldF.keys)
reactions += {
case KeyReleased(`inputFieldA`, k, _, _) =>
parseAction(inputFieldA.text) match {
case Some(r) =>
parsedA = Some(r)
barA.background = parsedBottomBarColor
if(k == Key.Enter && parsedF != None) close()
case None =>
barA.background = unParsedBottomBarColor
parsedA = None
}
case KeyReleased(`inputFieldF`, k, _, _) =>
parseFormula(inputFieldF.text) match {
case Some(r) =>
parsedF = Some(r)
barF.background = parsedBottomBarColor
if(k == Key.Enter && parsedA != None) close()
case None =>
barF.background = unParsedBottomBarColor
parsedF = None
}
}
layout(new FlowPanel(FlowPanel.Alignment.Left)(new Label("INPUT A PREFORM") {
font = new Font("Roboto-Bold", Font.BOLD, 16)
foreground = Color.white
}){opaque = false}) = North
layout(new BoxPanel(Orientation.Vertical) {
contents += barA
contents += barF
opaque = false
}) = Center
val okButton = new Button(swing.Action("Close popup") { close() }) {
text = "OK"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 0)
foreground = Color.white
}
val cancelButton = new Button(swing.Action("Cancel popup") {
parsedA = None
parsedF = None
close()
}) {
text = "CANCEL"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 10)
foreground = Color.white
}
layout(new FlowPanel(FlowPanel.Alignment.Right)( cancelButton, okButton ){opaque = false}) = South //{ cancel(); close() } )) = South
}
class SequentListPopup(ruleList : List[(Rule, List[Sequent])], session:CalcSession = CalcSession()) extends BorderPanel with Popup {
override def close() = {
visible = false
}
var pair:Option[Tuple2[Rule, List[Sequent]]] = None
val sideBarColorLight = new Color(250,250,250)
def cols:Int = ruleList.map(_._2.length).sorted.reverse.head + 1
def padList(len:Int):List[String] = {
val ret = ListBuffer[String]()
for (i <- 0 to len-1) ret += ""
return ret.toList
}
def padArrayI(len:Int):Array[Int] = {
val ret = ListBuffer[Int]()
for (i <- 0 to len-1) ret += 0
return ret.toArray
}
var ruleColWidth = padArrayI(cols)
var ruleColHeight = 0
def flattenedList:Array[Array[String]] = {
val ret = new ListBuffer[Array[String]]()
val columns = cols-1
ruleList.foreach{
case (a, list) =>
ret += ((ruleToString(a) :: list.map(sequentToString(_))) ++ padList(columns-list.length)).toArray
if (GUIHelper.createTeXIcon(ruleToString(a)).getIconWidth > ruleColWidth(0)) ruleColWidth(0) = GUIHelper.createTeXIcon(ruleToString(a)).getIconWidth
for (i <- 0 to list.length-1) {
// print(i)
// print(" : ")
// println(list(i))
val icon = GUIHelper.createTeXIcon(sequentToString(list(i)))
if (icon.getIconWidth > ruleColWidth(i+1)) ruleColWidth(i+1) = icon.getIconWidth
if (icon.getIconHeight > ruleColHeight) ruleColHeight = icon.getIconHeight
}
println(((ruleToString(a) :: list.map(sequentToString(_))) ++ padList(columns-list.length)))
}
println(ruleColWidth)
return ret.toArray
}
def resizeTable() = {
ruleListTable.peer.setAutoResizeMode( javax.swing.JTable.AUTO_RESIZE_OFF )
print("col count: ")
println(ruleListTable.model.getColumnCount())
for ( c <- 0 to cols-1 ) {
println( ruleColWidth(c) )
val tableColumn = ruleListTable.peer.getColumnModel().getColumn(c)
tableColumn.setMinWidth( ruleColWidth(c) )
}
}
val ruleListTable = new Table(ruleList.length, cols){
selection.elementMode = Table.ElementMode.Row
background = sideBarColorLight
opaque = false
peer.getTableHeader().setOpaque(false)
peer.getTableHeader().setFont(new Font("Roboto-Bold", Font.BOLD, 12))
peer.setDefaultRenderer(classOf[String], new TexRenderer())
val header = List("Rule", "Sequent(s)") ++ padList(cols-2)
model = new MyTableModel( flattenedList , header )
}
background = new Color(238,238,238)
//border = Swing.EmptyBorder(15,15,15,15)
//layout(ruleListTable) = Center
val prettyTable = new PrettyScrollPane(ruleListTable){
scrollPane.peer.getViewport().setBackground(sideBarColorLight)
}
layout(prettyTable) = Center
ruleListTable.peer.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_ENTER, 0), "Enter")
ruleListTable.peer.getActionMap().put("Enter", new AbstractAction() {
override def actionPerformed(ae:ActionEvent) {
val selectedRowIndex = ruleListTable.peer.getSelectedRow
if( 0 <= selectedRowIndex && selectedRowIndex < ruleListTable.model.getRowCount ){
pair = Some(ruleList(selectedRowIndex))
close()
}
else pair = None
}
})
ruleListTable.peer.getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0), "Escape")
ruleListTable.peer.getActionMap().put("Escape", new AbstractAction() {
override def actionPerformed(ae:ActionEvent) {
pair = None
close()
}
})
ruleListTable.listenTo(ruleListTable.mouse.clicks)
ruleListTable.reactions += {
case m : MouseClicked if m.clicks == 2 =>
val selectedRowIndex = ruleListTable.peer.getSelectedRow
if( 0 <= selectedRowIndex && selectedRowIndex < ruleListTable.model.getRowCount ){
pair = Some(ruleList(selectedRowIndex))
close()
}
else pair = None
}
class ForcedListSelectionModel extends javax.swing.DefaultListSelectionModel {
setSelectionMode(javax.swing.ListSelectionModel.SINGLE_SELECTION)
override def clearSelection() {}
override def removeSelectionInterval(index0:Int, index1:Int) {}
}
ruleListTable.peer.setSelectionModel(new ForcedListSelectionModel())
layout(new FlowPanel(FlowPanel.Alignment.Left)(new Label("SELECT A RULE TO APPLY") {
font = new Font("Roboto-Bold", Font.BOLD, 16)
foreground = new Color(33,33,33)
border = Swing.EmptyBorder(15, 15, 15, 15)
}){ opaque = false }) = North
val okButton = new Button(swing.Action("Close popup") {
val selectedRowIndex = ruleListTable.peer.getSelectedRow
if( 0 <= selectedRowIndex && selectedRowIndex < ruleListTable.model.getRowCount ){
pair = Some(ruleList(selectedRowIndex))
}
else pair = None
close()
}) {
text = "OK"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 0)
foreground = new Color(66,133,244)
}
val cancelButton = new Button(swing.Action("Cancel popup") {
pair = None
close()
}) {
text = "CANCEL"
font = new Font("Roboto-Bold", Font.BOLD, 12)
border = Swing.EmptyBorder(0, 0, 0, 10)
foreground = new Color(66,133,244)
}
layout(new FlowPanel(FlowPanel.Alignment.Right)( cancelButton, okButton ){
border = Swing.EmptyBorder(15, 15, 15, 15)
opaque = false
}) = South
resizeTable
//ruleListTable.peer.setFocusable(true)
//peer.requestFocusInWindow
}
trait Popup extends Panel {
def close()
}
class PopupPanel extends Panel {
override lazy val peer = {
val fixedPanel = new JPanel(new java.awt.GridBagLayout())
fixedPanel.setOpaque(false)
fixedPanel
}
var currentPopup:Option[Popup] = None
def add(p:Popup, scaling:Boolean = false) = {
if(scaling){
val c = new java.awt.GridBagConstraints()
c.weightx = 1.0
c.weighty = 1.0
c.fill = java.awt.GridBagConstraints.BOTH
c.insets = new java.awt.Insets(0, 30, 0, 30)
peer.add(p.peer, c)
}
else peer.add(p.peer)
currentPopup = Some(p)
}
def removeAll = {
currentPopup match {
case Some(p) =>
p.close
currentPopup = None
case None => ;
}
peer.removeAll
}
def displayPopup(panel:Popup, scaling:Boolean = false) = {
removeAll
add(panel, scaling)
panel.visible = true
revalidate
repaint
}
}
|
goodlyrottenapple/muddy-children
|
calculus/src/scala/gui/GUIDialogHelper.scala
|
Scala
|
mit
| 20,579 |
/* ----------------- sse-breaker ----------------- *\\
* Licensed under the Apache License, Version 2.0. *
* Author: Spiros Tzavellas *
\\* ----------------------------------------------- */
package com.tzavellas.sse
import scala.concurrent.duration._
package object breaker {
val DefaultTestConfiguration = CircuitConfiguration(
maxFailures = 5,
openCircuitTimeout = 10.minutes,
failureCountTimeout = 1.minute,
maxMethodDuration = 1.minute)
}
|
sptz45/sse-breaker
|
src/test/scala/com/tzavellas/sse/breaker/package.scala
|
Scala
|
apache-2.0
| 488 |
package com.rbmhtechnology.eventuate.sandbox
import java.util.concurrent.TimeUnit
import com.typesafe.config.Config
import scala.concurrent.duration._
class ReplicationSettings(config: Config) {
val askTimeout =
config.getDuration("sandbox.replication.ask-timeout", TimeUnit.MILLISECONDS).millis
val retryDelay =
config.getDuration("sandbox.replication.retry-delay", TimeUnit.MILLISECONDS).millis
val batchSize =
config.getInt("sandbox.replication.batch-size")
}
|
RBMHTechnology/eventuate-sandbox
|
src/main/scala/com/rbmhtechnology/eventuate/sandbox/ReplicationSettings.scala
|
Scala
|
apache-2.0
| 487 |
package kuaixue.scala.book.chapter5
object Enum extends Enumeration{
var Red, Yellow, Green = Value
}
|
slieer/scala-tutorials
|
src/main/scala/kuaixue/scala/book/chapter5/Enum.scala
|
Scala
|
apache-2.0
| 108 |
/*
* Copyright (C) 2015 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate.log
import java.io.File
import akka.actor._
import akka.pattern.ask
import akka.testkit.{TestProbe, TestKit}
import akka.util.Timeout
import com.rbmhtechnology.eventuate._
import com.rbmhtechnology.eventuate.ReplicationProtocol._
import com.rbmhtechnology.eventuate.log.EventLogSpec._
import com.rbmhtechnology.eventuate.log.cassandra._
import com.rbmhtechnology.eventuate.log.cassandra.CassandraIndex._
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog.ReadResult
import com.typesafe.config.Config
import org.apache.commons.io.FileUtils
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.iq80.leveldb.WriteBatch
import org.scalatest._
import scala.collection.immutable.Seq
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util._
trait EventLogCleanupLeveldb extends Suite with BeforeAndAfterAll {
def config: Config
def storageLocations: List[File] =
List("eventuate.log.leveldb.dir", "eventuate.snapshot.filesystem.dir").map(s => new File(config.getString(s)))
override def beforeAll(): Unit = {
storageLocations.foreach(FileUtils.deleteDirectory)
storageLocations.foreach(_.mkdirs())
}
override def afterAll(): Unit = {
storageLocations.foreach(FileUtils.deleteDirectory)
}
}
trait EventLogLifecycleLeveldb extends EventLogCleanupLeveldb with BeforeAndAfterEach {
private var _logCtr: Int = 0
private var _log: ActorRef = _
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
super.afterAll()
}
override def beforeEach(): Unit = {
super.beforeEach()
_logCtr += 1
_log = system.actorOf(logProps(logId))
}
def system: ActorSystem
def config: Config =
system.settings.config
def batching: Boolean =
true
def log: ActorRef =
_log
def logId: String =
_logCtr.toString
def logProps(logId: String): Props = {
val logProps = Props(new EventLogLifecycleLeveldb.TestEventLog(logId)).withDispatcher("eventuate.log.leveldb.write-dispatcher")
if (batching) Props(new BatchingEventLog(logProps)) else logProps
}
}
object EventLogLifecycleLeveldb {
class TestEventLog(id: String) extends LeveldbEventLog(id, "log-test") {
override def replay(from: Long, classifier: Int)(f: (DurableEvent) => Unit): Unit =
if (from == -1L) throw boom else super.replay(from, classifier)(f)
override def read(from: Long, max: Int, filter: ReplicationFilter): ReadResult =
if (from == -1L) throw boom else super.read(from, max, filter)
override def write(events: Seq[DurableEvent], batch: WriteBatch): Unit = events match {
case es if es.map(_.payload).contains("boom") => throw boom
case _ => super.write(events, batch)
}
override def unhandled(message: Any): Unit = message match {
case GetSequenceNr =>
sender() ! GetSequenceNrSuccess(generator.sequenceNr)
case GetReplicationProgress =>
sender() ! GetReplicationProgressSuccess(progressMap)
case SetReplicationProgress(logId, sequenceNr) =>
withBatch(batch => replicationProgressMap.writeReplicationProgress(logId, sequenceNr, batch))
case "boom" =>
throw boom
case _ =>
super.unhandled(message)
}
private def progressMap = List(EventLogSpec.remoteLogId, "x", "y").foldLeft[Map[String, Long]](Map.empty) {
case (map, logId) =>
val progress = replicationProgressMap.readReplicationProgress(logId)
if (progress == 0L) map else map + (logId -> progress)
}
}
}
trait EventLogCleanupCassandra extends Suite with BeforeAndAfterAll {
def config: Config
def storageLocations: List[File] =
List("eventuate.snapshot.filesystem.dir").map(s => new File(config.getString(s)))
override def beforeAll(): Unit = {
storageLocations.foreach(FileUtils.deleteDirectory)
storageLocations.foreach(_.mkdirs())
}
override def afterAll(): Unit = {
EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
storageLocations.foreach(FileUtils.deleteDirectory)
}
}
trait EventLogLifecycleCassandra extends EventLogCleanupCassandra with BeforeAndAfterEach {
import EventLogLifecycleCassandra._
private var _logCtr: Int = 0
private var _log: ActorRef = _
var indexProbe: TestProbe = _
override def beforeEach(): Unit = {
super.beforeEach()
indexProbe = new TestProbe(system)
_logCtr += 1
_log = createLog(TestFailureSpec(), indexProbe.ref)
}
override def beforeAll(): Unit = {
super.beforeAll()
EmbeddedCassandraServerHelper.startEmbeddedCassandra(60000)
}
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
super.afterAll()
}
def createLog(failureSpec: TestFailureSpec, indexProbe: ActorRef): ActorRef =
system.actorOf(logProps(logId, failureSpec, indexProbe))
def system: ActorSystem
def config: Config =
system.settings.config
def batching: Boolean =
true
def log: ActorRef =
_log
def logId: String =
_logCtr.toString
def logProps(logId: String, failureSpec: TestFailureSpec, indexProbe: ActorRef): Props = {
val logProps = Props(new TestEventLog(logId, failureSpec, indexProbe)).withDispatcher("eventuate.log.cassandra.write-dispatcher")
if (batching) Props(new BatchingEventLog(logProps)) else logProps
}
}
object EventLogLifecycleCassandra {
case class TestFailureSpec(
failOnSequenceNrRead: Boolean = false,
failBeforeIndexIncrementWrite: Boolean = false,
failAfterIndexIncrementWrite: Boolean = false)
class TestEventLog(id: String, failureSpec: TestFailureSpec, indexProbe: ActorRef) extends CassandraEventLog(id) {
import context.dispatcher
private var index: ActorRef = _
override def write(partition: Long, events: Seq[DurableEvent]): Unit = events match {
case es if es.map(_.payload).contains("boom") => throw boom
case _ => super.write(partition, events)
}
override def unhandled(message: Any): Unit = message match {
case GetSequenceNr =>
sender() ! GetSequenceNrSuccess(generator.sequenceNr)
case GetReplicationProgress =>
val sdr = sender()
getReplicationProgress(List(EventLogSpec.remoteLogId, "x", "y")) onComplete {
case Success(r) => sdr ! GetReplicationProgressSuccess(r)
case Failure(e) => sdr ! GetReplicationProgressFailure(e)
}
case "boom" =>
throw boom
case _ =>
super.unhandled(message)
}
override private[eventuate] def createReader(cassandra: Cassandra, logId: String) =
new TestEventReader(cassandra, logId)
override private[eventuate] def createIndex(cassandra: Cassandra, eventReader: CassandraEventReader, logId: String): ActorRef = {
index = context.actorOf(Props(new TestIndex(cassandra, eventReader, logId, failureSpec, indexProbe)))
index
}
private def getReplicationProgress(sourceLogIds: Seq[String]): Future[Map[String, Long]] = {
implicit val timeout = Timeout(10.seconds)
Future.sequence(sourceLogIds.map(sourceLogId => index.ask(GetLastSourceLogReadPosition(sourceLogId)).mapTo[GetLastSourceLogReadPositionSuccess])).map { results =>
results.foldLeft[Map[String, Long]](Map.empty) {
case (acc, GetLastSourceLogReadPositionSuccess(logId, snr)) => if (snr == 0L) acc else acc + (logId -> snr)
}
}
}
}
class TestEventReader(cassandra: Cassandra, logId: String) extends CassandraEventReader(cassandra, logId) {
override def replay(from: Long)(f: (DurableEvent) => Unit): Unit =
if (from == -1L) throw boom else super.replay(from)(f)
override def read(from: Long, max: Int, filter: ReplicationFilter, targetLogId: String): CassandraEventReader.ReadResult =
if (from == -1L) throw boom else super.read(from, max, filter, targetLogId)
}
class TestIndex(cassandra: Cassandra, eventReader: CassandraEventReader, logId: String, failureSpec: TestFailureSpec, indexProbe: ActorRef) extends CassandraIndex(cassandra, eventReader, logId) {
val stream = context.system.eventStream
override private[eventuate] def createIndexStore(cassandra: Cassandra, logId: String) =
new TestIndexStore(cassandra, logId, failureSpec)
override def onIndexEvent(event: Any): Unit =
indexProbe ! event
}
class TestIndexStore(cassandra: Cassandra, logId: String, failureSpec: TestFailureSpec) extends CassandraIndexStore(cassandra, logId) {
private var writeIndexIncrementFailed = false
private var readSequenceNumberFailed = false
override def writeAsync(replicationProgress: ReplicationProgress, aggregateEvents: AggregateEvents, sequenceNr: Long)(implicit executor: ExecutionContext): Future[Long] =
if (failureSpec.failBeforeIndexIncrementWrite && !writeIndexIncrementFailed) {
writeIndexIncrementFailed = true
Future.failed(boom)
} else if (failureSpec.failAfterIndexIncrementWrite && !writeIndexIncrementFailed) {
writeIndexIncrementFailed = true
for {
_ <- super.writeAsync(replicationProgress, aggregateEvents, sequenceNr)
r <- Future.failed(boom)
} yield r
} else super.writeAsync(replicationProgress, aggregateEvents, sequenceNr)
override def readSequenceNumberAsync: Future[Long] =
if (failureSpec.failOnSequenceNrRead && !readSequenceNumberFailed) {
readSequenceNumberFailed = true
Future.failed(boom)
} else super.readSequenceNumberAsync
}
}
|
linearregression/eventuate
|
src/it/scala/com/rbmhtechnology/eventuate/log/EventLogLifecycle.scala
|
Scala
|
apache-2.0
| 10,291 |
package com.rbmhtechnology.eventuate.chaos
import akka.actor.Props
import com.rbmhtechnology.eventuate.ReplicationEndpoint
import com.rbmhtechnology.eventuate.crdt._
object ChaosCounterLeveldb extends ChaosLeveldbSetup {
implicit val system = getSystem
val endpoint = getEndpoint
val service = new CounterService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName))
system.actorOf(Props(ChaosCounterInterface(service)))
}
object ChaosCounterCassandra extends ChaosCassandraSetup {
implicit val system = getSystem
val endpoint = getEndpoint
val service = new CounterService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName))
system.actorOf(Props(ChaosCounterInterface(service)))
}
object ChaosPureCounterLeveldb extends ChaosLeveldbSetup {
implicit val system = getSystem
val endpoint = getEndpoint
val service = new pure.CounterService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName))
system.actorOf(Props(ChaosCounterInterface(service)))
}
|
RBMHTechnology/eventuate-chaos
|
src/main/scala/com/rbmhtechnology/eventuate/chaos/ChaosCounter.scala
|
Scala
|
apache-2.0
| 1,005 |
package com.rasterfoundry.backsplash.server
import com.rasterfoundry.datamodel.AnnotationProject
import com.rasterfoundry.datamodel.{
AuthResult,
Project,
ProjectLayer,
Scene,
ToolRun,
UserWithPlatform
}
import com.rasterfoundry.http4s.{Cache => Http4sUtilCache}
import com.typesafe.scalalogging.LazyLogging
import scalacache._
import scalacache.caffeine._
object Cache extends LazyLogging {
val requestCounter = CaffeineCache[Int]
val authorizationCacheFlags = Flags(
Config.cache.authorizationCacheEnable,
Config.cache.authorizationCacheEnable
)
val caffeineAuthorizationCache: Cache[Boolean] =
CaffeineCache[Boolean]
logger.info(
s"Authorization Cache Status (read/write) ${authorizationCacheFlags}"
)
val caffeineSceneCache: Cache[Scene] =
CaffeineCache[Scene]
val caffeineProjectLayerCache: Cache[ProjectLayer] =
CaffeineCache[ProjectLayer]
val authenticationCacheFlags = Http4sUtilCache.authenticationCacheFlags
val caffeineAuthenticationCache: Cache[Option[UserWithPlatform]] =
CaffeineCache[Option[UserWithPlatform]]
logger.info(
s"Authentication Cache Status, backsplash: ${authenticationCacheFlags}"
)
val sceneAuthCache: Cache[AuthResult[Scene]] =
CaffeineCache[AuthResult[Scene]]
val projectAuthCache: Cache[AuthResult[Project]] =
CaffeineCache[AuthResult[Project]]
val annotationProjectAuthCache: Cache[AuthResult[AnnotationProject]] =
CaffeineCache[AuthResult[AnnotationProject]]
val toolRunAuthCache: Cache[AuthResult[ToolRun]] =
CaffeineCache[AuthResult[ToolRun]]
}
|
raster-foundry/raster-foundry
|
app-backend/backsplash-server/src/main/scala/com/rasterfoundry/backsplash/Cache.scala
|
Scala
|
apache-2.0
| 1,587 |
package pl.pholda.malpompaaligxilo.util
abstract class DateCompanion {
def now: Date
// ISO 8601 (YYYY-MM-DD)
def fromString(str: String): Date
}
|
pholda/MalpompaAligxilo
|
core/shared/src/main/scala/pl/pholda/malpompaaligxilo/util/DateCompanion.scala
|
Scala
|
gpl-3.0
| 154 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.File
import java.net.{URL, URLClassLoader}
import java.nio.charset.StandardCharsets
import java.sql.Timestamp
import java.util.Locale
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.collection.mutable.HashMap
import scala.language.implicitConversions
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.common.`type`.HiveDecimal
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.serde2.io.{DateWritable, TimestampWritable}
import org.apache.hadoop.util.VersionInfo
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.hive.client._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf._
import org.apache.spark.sql.internal.StaticSQLConf.{CATALOG_IMPLEMENTATION, WAREHOUSE_PATH}
import org.apache.spark.sql.types._
import org.apache.spark.util.{ChildFirstURLClassLoader, Utils}
private[spark] object HiveUtils extends Logging {
def withHiveExternalCatalog(sc: SparkContext): SparkContext = {
sc.conf.set(CATALOG_IMPLEMENTATION.key, "hive")
sc
}
/** The version of hive used internally by Spark SQL. */
val builtinHiveVersion: String = "1.2.1"
val HIVE_METASTORE_VERSION = buildConf("spark.sql.hive.metastore.version")
.doc("Version of the Hive metastore. Available options are " +
s"<code>0.12.0</code> through <code>2.3.4</code>.")
.stringConf
.createWithDefault(builtinHiveVersion)
// A fake config which is only here for backward compatibility reasons. This config has no effect
// to Spark, just for reporting the builtin Hive version of Spark to existing applications that
// already rely on this config.
val FAKE_HIVE_VERSION = buildConf("spark.sql.hive.version")
.doc(s"deprecated, please use ${HIVE_METASTORE_VERSION.key} to get the Hive version in Spark.")
.stringConf
.createWithDefault(builtinHiveVersion)
val HIVE_METASTORE_JARS = buildConf("spark.sql.hive.metastore.jars")
.doc(s"""
| Location of the jars that should be used to instantiate the HiveMetastoreClient.
| This property can be one of three options: "
| 1. "builtin"
| Use Hive ${builtinHiveVersion}, which is bundled with the Spark assembly when
| <code>-Phive</code> is enabled. When this option is chosen,
| <code>spark.sql.hive.metastore.version</code> must be either
| <code>${builtinHiveVersion}</code> or not defined.
| 2. "maven"
| Use Hive jars of specified version downloaded from Maven repositories.
| 3. A classpath in the standard format for both Hive and Hadoop.
""".stripMargin)
.stringConf
.createWithDefault("builtin")
val CONVERT_METASTORE_PARQUET = buildConf("spark.sql.hive.convertMetastoreParquet")
.doc("When set to true, the built-in Parquet reader and writer are used to process " +
"parquet tables created by using the HiveQL syntax, instead of Hive serde.")
.booleanConf
.createWithDefault(true)
val CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING =
buildConf("spark.sql.hive.convertMetastoreParquet.mergeSchema")
.doc("When true, also tries to merge possibly different but compatible Parquet schemas in " +
"different Parquet data files. This configuration is only effective " +
"when \\"spark.sql.hive.convertMetastoreParquet\\" is true.")
.booleanConf
.createWithDefault(false)
val CONVERT_METASTORE_ORC = buildConf("spark.sql.hive.convertMetastoreOrc")
.doc("When set to true, the built-in ORC reader and writer are used to process " +
"ORC tables created by using the HiveQL syntax, instead of Hive serde.")
.booleanConf
.createWithDefault(true)
val CONVERT_METASTORE_CTAS = buildConf("spark.sql.hive.convertMetastoreCtas")
.doc("When set to true, Spark will try to use built-in data source writer " +
"instead of Hive serde in CTAS. This flag is effective only if " +
"`spark.sql.hive.convertMetastoreParquet` or `spark.sql.hive.convertMetastoreOrc` is " +
"enabled respectively for Parquet and ORC formats")
.booleanConf
.createWithDefault(true)
val HIVE_METASTORE_SHARED_PREFIXES = buildConf("spark.sql.hive.metastore.sharedPrefixes")
.doc("A comma separated list of class prefixes that should be loaded using the classloader " +
"that is shared between Spark SQL and a specific version of Hive. An example of classes " +
"that should be shared is JDBC drivers that are needed to talk to the metastore. Other " +
"classes that need to be shared are those that interact with classes that are already " +
"shared. For example, custom appenders that are used by log4j.")
.stringConf
.toSequence
.createWithDefault(jdbcPrefixes)
private def jdbcPrefixes = Seq(
"com.mysql.jdbc", "org.postgresql", "com.microsoft.sqlserver", "oracle.jdbc")
val HIVE_METASTORE_BARRIER_PREFIXES = buildConf("spark.sql.hive.metastore.barrierPrefixes")
.doc("A comma separated list of class prefixes that should explicitly be reloaded for each " +
"version of Hive that Spark SQL is communicating with. For example, Hive UDFs that are " +
"declared in a prefix that typically would be shared (i.e. <code>org.apache.spark.*</code>).")
.stringConf
.toSequence
.createWithDefault(Nil)
val HIVE_THRIFT_SERVER_ASYNC = buildConf("spark.sql.hive.thriftServer.async")
.doc("When set to true, Hive Thrift server executes SQL queries in an asynchronous way.")
.booleanConf
.createWithDefault(true)
/**
* The version of the hive client that will be used to communicate with the metastore. Note that
* this does not necessarily need to be the same version of Hive that is used internally by
* Spark SQL for execution.
*/
private def hiveMetastoreVersion(conf: SQLConf): String = {
conf.getConf(HIVE_METASTORE_VERSION)
}
/**
* The location of the jars that should be used to instantiate the HiveMetastoreClient. This
* property can be one of three options:
* - a classpath in the standard format for both hive and hadoop.
* - builtin - attempt to discover the jars that were used to load Spark SQL and use those. This
* option is only valid when using the execution version of Hive.
* - maven - download the correct version of hive on demand from maven.
*/
private def hiveMetastoreJars(conf: SQLConf): String = {
conf.getConf(HIVE_METASTORE_JARS)
}
/**
* A comma separated list of class prefixes that should be loaded using the classloader that
* is shared between Spark SQL and a specific version of Hive. An example of classes that should
* be shared is JDBC drivers that are needed to talk to the metastore. Other classes that need
* to be shared are those that interact with classes that are already shared. For example,
* custom appenders that are used by log4j.
*/
private def hiveMetastoreSharedPrefixes(conf: SQLConf): Seq[String] = {
conf.getConf(HIVE_METASTORE_SHARED_PREFIXES).filterNot(_ == "")
}
/**
* A comma separated list of class prefixes that should explicitly be reloaded for each version
* of Hive that Spark SQL is communicating with. For example, Hive UDFs that are declared in a
* prefix that typically would be shared (i.e. org.apache.spark.*)
*/
private def hiveMetastoreBarrierPrefixes(conf: SQLConf): Seq[String] = {
conf.getConf(HIVE_METASTORE_BARRIER_PREFIXES).filterNot(_ == "")
}
/**
* Change time configurations needed to create a [[HiveClient]] into unified [[Long]] format.
*/
private[hive] def formatTimeVarsForHiveClient(hadoopConf: Configuration): Map[String, String] = {
// Hive 0.14.0 introduces timeout operations in HiveConf, and changes default values of a bunch
// of time `ConfVar`s by adding time suffixes (`s`, `ms`, and `d` etc.). This breaks backwards-
// compatibility when users are trying to connecting to a Hive metastore of lower version,
// because these options are expected to be integral values in lower versions of Hive.
//
// Here we enumerate all time `ConfVar`s and convert their values to numeric strings according
// to their output time units.
Seq(
ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY -> TimeUnit.SECONDS,
ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME -> TimeUnit.SECONDS,
ConfVars.HMSHANDLERINTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.METASTORE_EVENT_DB_LISTENER_TTL -> TimeUnit.SECONDS,
ConfVars.METASTORE_EVENT_CLEAN_FREQ -> TimeUnit.SECONDS,
ConfVars.METASTORE_EVENT_EXPIRY_DURATION -> TimeUnit.SECONDS,
ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL -> TimeUnit.SECONDS,
ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT -> TimeUnit.MILLISECONDS,
ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT -> TimeUnit.MILLISECONDS,
ConfVars.HIVES_AUTO_PROGRESS_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_STATS_JDBC_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_STATS_RETRIES_WAIT -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES -> TimeUnit.SECONDS,
ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_TXN_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_COMPACTOR_WORKER_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_COMPACTOR_CHECK_INTERVAL -> TimeUnit.SECONDS,
ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_IDLE_SESSION_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_IDLE_OPERATION_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.SERVER_READ_SOCKET_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.SPARK_JOB_MONITOR_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.SPARK_RPC_CLIENT_CONNECT_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT -> TimeUnit.MILLISECONDS
).map { case (confVar, unit) =>
confVar.varname -> HiveConf.getTimeVar(hadoopConf, confVar, unit).toString
}.toMap
}
/**
* Check current Thread's SessionState type
* @return true when SessionState.get returns an instance of CliSessionState,
* false when it gets non-CliSessionState instance or null
*/
def isCliSessionState(): Boolean = {
val state = SessionState.get
var temp: Class[_] = if (state != null) state.getClass else null
var found = false
while (temp != null && !found) {
found = temp.getName == "org.apache.hadoop.hive.cli.CliSessionState"
temp = temp.getSuperclass
}
found
}
/**
* Create a [[HiveClient]] used for execution.
*
* Currently this must always be Hive 1.2.1 as this is the version of Hive that is packaged
* with Spark SQL. This copy of the client is used for execution related tasks like
* registering temporary functions or ensuring that the ThreadLocal SessionState is
* correctly populated. This copy of Hive is *not* used for storing persistent metadata,
* and only point to a dummy metastore in a temporary directory.
*/
protected[hive] def newClientForExecution(
conf: SparkConf,
hadoopConf: Configuration): HiveClientImpl = {
logInfo(s"Initializing execution hive, version $builtinHiveVersion")
val loader = new IsolatedClientLoader(
version = IsolatedClientLoader.hiveVersion(builtinHiveVersion),
sparkConf = conf,
execJars = Seq.empty,
hadoopConf = hadoopConf,
config = newTemporaryConfiguration(useInMemoryDerby = true),
isolationOn = false,
baseClassLoader = Utils.getContextOrSparkClassLoader)
loader.createClient().asInstanceOf[HiveClientImpl]
}
/**
* Create a [[HiveClient]] used to retrieve metadata from the Hive MetaStore.
*
* The version of the Hive client that is used here must match the metastore that is configured
* in the hive-site.xml file.
*/
protected[hive] def newClientForMetadata(
conf: SparkConf,
hadoopConf: Configuration): HiveClient = {
val configurations = formatTimeVarsForHiveClient(hadoopConf)
newClientForMetadata(conf, hadoopConf, configurations)
}
protected[hive] def newClientForMetadata(
conf: SparkConf,
hadoopConf: Configuration,
configurations: Map[String, String]): HiveClient = {
val sqlConf = new SQLConf
sqlConf.setConf(SQLContext.getSQLProperties(conf))
val hiveMetastoreVersion = HiveUtils.hiveMetastoreVersion(sqlConf)
val hiveMetastoreJars = HiveUtils.hiveMetastoreJars(sqlConf)
val hiveMetastoreSharedPrefixes = HiveUtils.hiveMetastoreSharedPrefixes(sqlConf)
val hiveMetastoreBarrierPrefixes = HiveUtils.hiveMetastoreBarrierPrefixes(sqlConf)
val metaVersion = IsolatedClientLoader.hiveVersion(hiveMetastoreVersion)
val isolatedLoader = if (hiveMetastoreJars == "builtin") {
if (builtinHiveVersion != hiveMetastoreVersion) {
throw new IllegalArgumentException(
"Builtin jars can only be used when hive execution version == hive metastore version. " +
s"Execution: $builtinHiveVersion != Metastore: $hiveMetastoreVersion. " +
s"Specify a valid path to the correct hive jars using ${HIVE_METASTORE_JARS.key} " +
s"or change ${HIVE_METASTORE_VERSION.key} to $builtinHiveVersion.")
}
// We recursively find all jars in the class loader chain,
// starting from the given classLoader.
def allJars(classLoader: ClassLoader): Array[URL] = classLoader match {
case null => Array.empty[URL]
case childFirst: ChildFirstURLClassLoader =>
childFirst.getURLs() ++ allJars(Utils.getSparkClassLoader)
case urlClassLoader: URLClassLoader =>
urlClassLoader.getURLs ++ allJars(urlClassLoader.getParent)
case other => allJars(other.getParent)
}
val classLoader = Utils.getContextOrSparkClassLoader
val jars = allJars(classLoader)
if (jars.length == 0) {
throw new IllegalArgumentException(
"Unable to locate hive jars to connect to metastore. " +
s"Please set ${HIVE_METASTORE_JARS.key}.")
}
logInfo(
s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion using Spark classes.")
new IsolatedClientLoader(
version = metaVersion,
sparkConf = conf,
hadoopConf = hadoopConf,
execJars = jars.toSeq,
config = configurations,
isolationOn = !isCliSessionState(),
barrierPrefixes = hiveMetastoreBarrierPrefixes,
sharedPrefixes = hiveMetastoreSharedPrefixes)
} else if (hiveMetastoreJars == "maven") {
// TODO: Support for loading the jars from an already downloaded location.
logInfo(
s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion using maven.")
IsolatedClientLoader.forVersion(
hiveMetastoreVersion = hiveMetastoreVersion,
hadoopVersion = VersionInfo.getVersion,
sparkConf = conf,
hadoopConf = hadoopConf,
config = configurations,
barrierPrefixes = hiveMetastoreBarrierPrefixes,
sharedPrefixes = hiveMetastoreSharedPrefixes)
} else {
// Convert to files and expand any directories.
val jars =
hiveMetastoreJars
.split(File.pathSeparator)
.flatMap {
case path if new File(path).getName == "*" =>
val files = new File(path).getParentFile.listFiles()
if (files == null) {
logWarning(s"Hive jar path '$path' does not exist.")
Nil
} else {
files.filter(_.getName.toLowerCase(Locale.ROOT).endsWith(".jar"))
}
case path =>
new File(path) :: Nil
}
.map(_.toURI.toURL)
logInfo(
s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion " +
s"using ${jars.mkString(":")}")
new IsolatedClientLoader(
version = metaVersion,
sparkConf = conf,
hadoopConf = hadoopConf,
execJars = jars.toSeq,
config = configurations,
isolationOn = true,
barrierPrefixes = hiveMetastoreBarrierPrefixes,
sharedPrefixes = hiveMetastoreSharedPrefixes)
}
isolatedLoader.createClient()
}
/** Constructs a configuration for hive, where the metastore is located in a temp directory. */
def newTemporaryConfiguration(useInMemoryDerby: Boolean): Map[String, String] = {
val withInMemoryMode = if (useInMemoryDerby) "memory:" else ""
val tempDir = Utils.createTempDir()
val localMetastore = new File(tempDir, "metastore")
val propMap: HashMap[String, String] = HashMap()
// We have to mask all properties in hive-site.xml that relates to metastore data source
// as we used a local metastore here.
HiveConf.ConfVars.values().foreach { confvar =>
if (confvar.varname.contains("datanucleus") || confvar.varname.contains("jdo")
|| confvar.varname.contains("hive.metastore.rawstore.impl")) {
propMap.put(confvar.varname, confvar.getDefaultExpr())
}
}
propMap.put(WAREHOUSE_PATH.key, localMetastore.toURI.toString)
propMap.put(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname,
s"jdbc:derby:${withInMemoryMode};databaseName=${localMetastore.getAbsolutePath};create=true")
propMap.put("datanucleus.rdbms.datastoreAdapterClassName",
"org.datanucleus.store.rdbms.adapter.DerbyAdapter")
// SPARK-11783: When "hive.metastore.uris" is set, the metastore connection mode will be
// remote (https://cwiki.apache.org/confluence/display/Hive/AdminManual+MetastoreAdmin
// mentions that "If hive.metastore.uris is empty local mode is assumed, remote otherwise").
// Remote means that the metastore server is running in its own process.
// When the mode is remote, configurations like "javax.jdo.option.ConnectionURL" will not be
// used (because they are used by remote metastore server that talks to the database).
// Because execution Hive should always connects to an embedded derby metastore.
// We have to remove the value of hive.metastore.uris. So, the execution Hive client connects
// to the actual embedded derby metastore instead of the remote metastore.
// You can search HiveConf.ConfVars.METASTOREURIS in the code of HiveConf (in Hive's repo).
// Then, you will find that the local metastore mode is only set to true when
// hive.metastore.uris is not set.
propMap.put(ConfVars.METASTOREURIS.varname, "")
// The execution client will generate garbage events, therefore the listeners that are generated
// for the execution clients are useless. In order to not output garbage, we don't generate
// these listeners.
propMap.put(ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, "")
propMap.put(ConfVars.METASTORE_EVENT_LISTENERS.varname, "")
propMap.put(ConfVars.METASTORE_END_FUNCTION_LISTENERS.varname, "")
// SPARK-21451: Spark will gather all `spark.hadoop.*` properties from a `SparkConf` to a
// Hadoop Configuration internally, as long as it happens after SparkContext initialized.
// Some instances such as `CliSessionState` used in `SparkSQLCliDriver` may also rely on these
// Configuration. But it happens before SparkContext initialized, we need to take them from
// system properties in the form of regular hadoop configurations.
SparkHadoopUtil.get.appendSparkHadoopConfigs(sys.props.toMap, propMap)
propMap.toMap
}
/**
* Infers the schema for Hive serde tables and returns the CatalogTable with the inferred schema.
* When the tables are data source tables or the schema already exists, returns the original
* CatalogTable.
*/
def inferSchema(table: CatalogTable): CatalogTable = {
if (DDLUtils.isDatasourceTable(table) || table.dataSchema.nonEmpty) {
table
} else {
val hiveTable = HiveClientImpl.toHiveTable(table)
// Note: Hive separates partition columns and the schema, but for us the
// partition columns are part of the schema
val partCols = hiveTable.getPartCols.asScala.map(HiveClientImpl.fromHiveColumn)
val dataCols = hiveTable.getCols.asScala.map(HiveClientImpl.fromHiveColumn)
table.copy(schema = StructType(dataCols ++ partCols))
}
}
}
|
hhbyyh/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
|
Scala
|
apache-2.0
| 22,707 |
package colossus.metrics
import akka.actor._
import akka.testkit._
import scala.language.higherKinds
import org.scalatest._
class NewSpec extends WordSpec with MustMatchers with BeforeAndAfterAll{
implicit val sys = ActorSystem("test")
def localCProbe: (LocalCollection, TestProbe) = {
val p = TestProbe()
implicit val a = p.ref
(new LocalCollection, p)
}
override def afterAll() {
sys.shutdown()
}
"LocalCollection" must {
"create a local collection" in {
val (c, probe) = localCProbe
1 must equal(1)
}
"create a rate" in {
val (c, probe) = localCProbe
val r: Rate = c.getOrAdd(Rate("/foo"))
1 must equal(1)
}
"create a counter" in {
val (c, probe) = localCProbe
val co: Counter = c.getOrAdd(Counter("/foo"))
1 must equal(1)
}
"return existing collector of same name and type" in {
val (c, probe) = localCProbe
val r: Rate = c.getOrAdd(Rate("/foo"))
val r2: Rate = c.getOrAdd(Rate("/foo"))
r must equal(r2)
}
"throw exception on creating wrong type on address match" in {
val (c, probe) = localCProbe
val r: Rate = c.getOrAdd(Rate("/foo"))
a[DuplicateMetricException] must be thrownBy {
val co: Counter = c.getOrAdd(Counter("/foo"))
}
}
"create a subcollection" in {
val (c, probe) = localCProbe
val sub = c.subCollection("/bar")
val r: Rate = sub.getOrAdd(Rate("/baz"))
r.address must equal(MetricAddress("/bar/baz"))
}
"uniqueness of addresses in sub collections" in {
val (c, probe) = localCProbe
val sub = c.subCollection("/bar")
val o: Counter = c.getOrAdd(Counter("/bar/baz"))
a[DuplicateMetricException] must be thrownBy {
val r: Rate = sub.getOrAdd(Rate("/baz"))
}
}
}
}
|
zgagnon/colossus
|
colossus-metrics/src/test/scala/colossus/metrics/CollectionSpec.scala
|
Scala
|
apache-2.0
| 1,862 |
package skinny.task
import org.scalatest._
class AssetsPrecompileTaskSpec extends FlatSpec with Matchers {
it should "be available" in {
AssetsPrecompileTask.main(Array("tmp"))
}
}
|
skinny-framework/skinny-framework
|
assets/src/test/scala/skinny/task/AssetsPrecompileTaskSpec.scala
|
Scala
|
mit
| 193 |
package org.thp.cortex.controllers
import scala.concurrent.{ ExecutionContext, Future }
import play.api.libs.json.JsObject
import play.api.mvc.{ AbstractController, Action, AnyContent, ControllerComponents }
import javax.inject.{ Inject, Singleton }
import org.thp.cortex.models.{ BaseConfig, Roles }
import org.thp.cortex.services.{ ResponderConfigSrv, UserSrv }
import org.elastic4play.BadRequestError
import org.elastic4play.controllers.{ Authenticated, Fields, FieldsBodyParser, Renderer }
@Singleton
class ResponderConfigCtrl @Inject() (
responderConfigSrv: ResponderConfigSrv,
userSrv: UserSrv,
authenticated: Authenticated,
fieldsBodyParser: FieldsBodyParser,
renderer: Renderer,
components: ControllerComponents,
implicit val ec: ExecutionContext) extends AbstractController(components) {
def get(analyzerConfigName: String): Action[AnyContent] = authenticated(Roles.orgAdmin).async { request ⇒
responderConfigSrv.getForUser(request.userId, analyzerConfigName)
.map(renderer.toOutput(OK, _))
}
def list(): Action[AnyContent] = authenticated(Roles.orgAdmin).async { request ⇒
responderConfigSrv.listConfigForUser(request.userId)
.map { bc ⇒
renderer.toOutput(OK, bc.sortWith {
case (BaseConfig("global", _, _, _), _) ⇒ true
case (_, BaseConfig("global", _, _, _)) ⇒ false
case (BaseConfig(a, _, _, _), BaseConfig(b, _, _, _)) ⇒ a.compareTo(b) < 0
})
}
}
def update(analyzerConfigName: String): Action[Fields] = authenticated(Roles.orgAdmin).async(fieldsBodyParser) { implicit request ⇒
request.body.getValue("config").flatMap(_.asOpt[JsObject]) match {
case Some(config) ⇒ responderConfigSrv.updateOrCreate(request.userId, analyzerConfigName, config)
.map(renderer.toOutput(OK, _))
case None ⇒ Future.failed(BadRequestError("attribute config has invalid format"))
}
}
}
|
CERT-BDF/Cortex
|
app/org/thp/cortex/controllers/ResponderConfigCtrl.scala
|
Scala
|
agpl-3.0
| 1,970 |
/**
* 编写一个Scala对象,该对象带有一个易失(volatile)的Boolean字段。
* 让某一个线程睡眠一段时间,之后将该字段设为true,打印消息,然后退出。
* 而另一个线程不停的检查该字段是否为true。
* 如果是,它将打印一个消息并退出。
* 如果不是,则它将短暂睡眠,然后重试。如果变量不是易失的,会发生什么?
*/
/***
import concurrent.ops.spawn
object Test06{
@volatile var value = false
}
spawn {
Thread.sleep(100);
Test06.value = true
println("Thread1: setting value to TRUE!")
}
spawn {
while(!Test06.value) Thread.sleep(20);
println("Thread2: value is TRUE!")
}
**/
|
vernonzheng/scala-for-the-Impatient
|
src/Chapter15/exercise06.scala
|
Scala
|
mit
| 692 |
package net.sansa_stack.inference.spark.forwardchaining.triples
import net.sansa_stack.inference.data.RDF
import net.sansa_stack.inference.rules.{HighLevelRuleDependencyGraphGenerator, RuleDependencyGraph, RuleDependencyGraphGenerator}
import net.sansa_stack.inference.spark.data.model.AbstractRDFGraphSpark
import net.sansa_stack.inference.spark.rules.RuleExecutor
import org.apache.jena.reasoner.rulesys.Rule
import org.apache.spark.sql.SparkSession
import org.slf4j.LoggerFactory
import scala.language.{existentials, implicitConversions}
/**
* An optimized implementation of the forward chaining based reasoner.
*
* @author Lorenz Buehmann
*/
abstract class ForwardRuleReasonerOptimized[Rdf <: RDF, D, N <: Rdf#Node, T <: Rdf#Triple, G <: AbstractRDFGraphSpark[Rdf, D, G]]
(sparkSession: SparkSession, rules: Set[Rule], ruleExecutor: RuleExecutor[Rdf, D, N, T, G])
extends AbstractForwardRuleReasoner[Rdf, D, G] {
private val logger = com.typesafe.scalalogging.Logger(LoggerFactory.getLogger(this.getClass.getName))
var ruleExecutionCnt = 0
var countCnt = 0
var unionCnt = 0
var distinctCnt = 0
def reset(): Unit = {
ruleExecutionCnt = 0
countCnt = 0
unionCnt = 0
distinctCnt = 0
}
def showExecutionStats(): Unit = {
info("#Executed Rules:" + ruleExecutionCnt)
info("#Count Request:" + countCnt)
info("#Union Request:" + unionCnt)
info("#Distinct Request:" + distinctCnt)
}
/**
* Applies forward chaining to the given RDF graph and returns a new RDF graph that contains all additional
* triples based on the underlying set of rules.
*
* @param graph the RDF graph
* @return the materialized RDF graph
*/
def apply(graph: G): G = {
reset()
var newGraph = graph.cache()
// generate the rule dependency graph
val dependencyGraph = RuleDependencyGraphGenerator.generate(rules, pruned = true)
// generate the high-level dependency graph
val highLevelDependencyGraph = HighLevelRuleDependencyGraphGenerator.generate(dependencyGraph)
// apply topological sort and get the layers
val layers = highLevelDependencyGraph.layers().foldLeft(List[(Int, scala.Iterable[RuleDependencyGraph])]())((b, a) => a :: b)
// each layer contains a set of rule dependency graphs
// for each layer we process those
layers foreach { layer =>
newGraph = newGraph
.union(processLayer(layer, newGraph))
.distinct()
.cache()
unionCnt += 1
distinctCnt += 1
}
// de-duplicate
// newGraph = newGraph.distinct()
// return new graph
newGraph
}
private def processLayer(layer: (Int, Iterable[RuleDependencyGraph]), graph: G): G = {
logger.info("Processing layer " + layer._1 + "---" * 10)
logger.info(layer._2.map(rdg => rdg.printNodes()).mkString("--"))
var newGraph = graph
val processedRDGs = layer._2.map{rdg =>
logger.info("Processing dependency graph " + rdg.printNodes())
applyRules(rdg, newGraph)
}
newGraph = newGraph.unionAll(processedRDGs.toSeq).distinct().cache()
unionCnt += 1
distinctCnt += 1
newGraph
}
/**
* Apply the set of rules on the given graph by doing fix-point iteration.
*
* @param rdg the rule dependency graph
* @param graph the RDF graph
*/
def applyRules(rdg: RuleDependencyGraph, graph: G): G = {
var newGraph = graph.cache()
val rules = rdg.rules().toSeq
if(rdg.hasCycle()) {
var newGraph = graph.cache()
var iteration = 1
var oldCount = 0L
var nextCount = newGraph.size()
logger.info(s"initial size:$nextCount")
do {
logger.info("Iteration " + iteration)
iteration += 1
oldCount = nextCount
newGraph = newGraph.union(applyRulesOnce(rules, newGraph)).distinct().cache()
unionCnt += 1
distinctCnt += 1
nextCount = newGraph.size()
logger.info(s"new size:$nextCount")
countCnt += 1
} while (nextCount != oldCount)
} else {
newGraph = newGraph.union(applyRulesOnce(rules, newGraph))
}
newGraph
}
/**
* Apply the set of rules on the given graph by doing fix-point iteration.
*
* @param rules the rules
* @param graph the graph
*/
def applyRules(rules: Seq[Rule], graph: G): G = {
var newGraph = graph.cache()
var iteration = 1
var oldCount = 0L
var nextCount = newGraph.size()
logger.info(s"initial size:$nextCount")
do {
logger.info("Iteration " + iteration)
iteration += 1
oldCount = nextCount
newGraph = newGraph.union(applyRulesOnce(rules, newGraph)).distinct().cache()
unionCnt += 1
distinctCnt += 1
nextCount = newGraph.size()
logger.info(s"new size:$nextCount")
countCnt += 1
} while (nextCount != oldCount)
newGraph
}
/**
* Apply the set of rules on the given graph once.
*
* @param rules the rules
* @param graph the graph
*/
def applyRulesOnce(rules: Seq[Rule], graph: G): G = {
val graphs = rules.map {rule =>
applyRule(rule, graph)
}
val newGraph = graph.unionAll(graphs.toList)
unionCnt += 1
// println(newGraph.toRDD().toDebugString)
newGraph
}
/**
* Apply a single rule on the given graph.
*
* @param rule the rule
* @param graph the graph
*/
def applyRule(rule: Rule, graph: G): G = {
logger.debug("Applying rule:" + rule)
ruleExecutionCnt += 1
ruleExecutor.execute(rule, graph)
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-inference/sansa-inference-spark/src/main/scala/net/sansa_stack/inference/spark/forwardchaining/triples/ForwardRuleReasonerOptimized.scala
|
Scala
|
apache-2.0
| 5,547 |
import com.amazonaws.auth.InstanceProfileCredentialsProvider
import com.typesafe.sbt.SbtScalariform._
import ohnosequences.sbt.SbtS3Resolver
import ohnosequences.sbt.SbtS3Resolver.{ S3Resolver, s3, s3resolver }
import org.scalastyle.sbt.ScalastylePlugin.{ Settings => styleSettings }
import sbt.Keys._
import sbt._
import sbtrelease.ReleasePlugin._
import scalariform.formatter.preferences._
object ChaosBuild extends Build {
lazy val root = Project(
id = "chaos",
base = file("."),
settings = baseSettings ++
releaseSettings ++
publishSettings ++
formatSettings ++
styleSettings ++
Seq(
libraryDependencies ++= Dependencies.root,
parallelExecution in Test := false,
fork in Test := true
)
)
lazy val testScalaStyle = taskKey[Unit]("testScalaStyle")
testScalaStyle := {
org.scalastyle.sbt.PluginKeys.scalastyle.toTask("").value
}
(test in Test) <<= (test in Test) dependsOn testScalaStyle
lazy val baseSettings = Defaults.defaultSettings ++ Seq (
organization := "mesosphere",
scalaVersion := "2.11.7",
crossScalaVersions := Seq("2.11.7"),
scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.8", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"),
javacOptions in Compile ++= Seq("-encoding", "UTF-8", "-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation"),
resolvers ++= Seq(
"Mesosphere Public Repo" at "http://downloads.mesosphere.io/maven",
"Twitter Maven2 Repository" at "http://maven.twttr.com/",
"Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
)
)
lazy val publishSettings = S3Resolver.defaults ++ Seq(
publishTo := Some(s3resolver.value(
"Mesosphere Public Repo (S3)",
s3("downloads.mesosphere.io/maven")
)),
SbtS3Resolver.s3credentials := new InstanceProfileCredentialsProvider()
)
lazy val formatSettings = scalariformSettings ++ Seq(
ScalariformKeys.preferences := FormattingPreferences()
.setPreference(IndentWithTabs, false)
.setPreference(IndentSpaces, 2)
.setPreference(AlignParameters, true)
.setPreference(DoubleIndentClassDeclaration, true)
.setPreference(MultilineScaladocCommentsStartOnFirstLine, false)
.setPreference(PlaceScaladocAsterisksBeneathSecondAsterisk, true)
.setPreference(PreserveDanglingCloseParenthesis, true)
.setPreference(CompactControlReadability, true) //MV: should be false!
.setPreference(AlignSingleLineCaseStatements, true)
.setPreference(PreserveSpaceBeforeArguments, true)
.setPreference(SpaceBeforeColon, false)
.setPreference(SpaceInsideBrackets, false)
.setPreference(SpaceInsideParentheses, false)
.setPreference(SpacesWithinPatternBinders, true)
.setPreference(FormatXml, true)
)
}
object Dependencies {
import Dependency._
val root = Seq(
// runtime
guava % "compile",
guice % "compile",
guiceServlet % "compile",
jettyServer % "compile",
jettyServlet % "compile",
jettySecurity % "compile",
jerseyCore % "compile",
jerseyServer % "compile",
jerseyServlet % "compile",
jerseyGuice % "compile",
jacksonScala % "compile",
jacksonJaxrs % "compile",
hibernate % "compile",
glassfish % "compile",
metricsJersey % "compile",
metricsJvm % "compile",
metricsJetty % "compile",
metricsServlets % "compile",
scallop % "compile",
mustache % "compile",
slf4jLog4j % "compile",
slf4jJul % "compile",
log4j % "compile",
liftMD % "compile",
// test
Test.junit % "test",
Test.mockito % "test"
)
}
object Dependency {
object V {
// runtime deps versions
val Guava = "17.0"
val Guice = "3.0"
val Scallop = "0.9.5"
val Jersey = "1.18.1"
val Metrics = "3.1.2"
val Jetty = "9.3.2.v20150730"
val Jackson = "2.4.5"
val Hibernate = "5.2.1.Final"
val Mustache = "0.9.0"
val Slf4j = "1.7.12"
val LiftMarkdown = "2.6.2"
val Log4J = "1.2.17"
val Glassfish = "2.2.6"
// test deps versions
val JUnit = "4.12"
val Mockito = "1.10.19"
}
val guava = "com.google.guava" % "guava" % V.Guava
val guice = "com.google.inject" % "guice" % V.Guice
val guiceServlet = "com.google.inject.extensions" % "guice-servlet" % V.Guice
val jettyServer = "org.eclipse.jetty" % "jetty-server" % V.Jetty
val jettyServlet = "org.eclipse.jetty" % "jetty-servlet" % V.Jetty
val jettySecurity = "org.eclipse.jetty" % "jetty-security" % V.Jetty
val jerseyCore = "com.sun.jersey" % "jersey-core" % V.Jersey
val jerseyServer = "com.sun.jersey" % "jersey-server" % V.Jersey
val jerseyServlet = "com.sun.jersey" % "jersey-servlet" % V.Jersey
val jerseyGuice = "com.sun.jersey.contribs" % "jersey-guice" % V.Jersey
val jacksonScala = "com.fasterxml.jackson.module" %% "jackson-module-scala" % V.Jackson
val jacksonJaxrs = "com.fasterxml.jackson.jaxrs" % "jackson-jaxrs-json-provider" % V.Jackson
val hibernate = "org.hibernate" % "hibernate-validator" % V.Hibernate
val glassfish = "org.glassfish.web" % "javax.el" % V.Glassfish
val metricsJersey = "io.dropwizard.metrics" % "metrics-jersey" % V.Metrics
val metricsJvm = "io.dropwizard.metrics" % "metrics-jvm" % V.Metrics
val metricsJetty = "io.dropwizard.metrics" % "metrics-jetty9" % V.Metrics
val metricsServlets = "io.dropwizard.metrics" % "metrics-servlets" % V.Metrics
val scallop = "org.rogach" %% "scallop" % V.Scallop
val mustache = "com.github.spullara.mustache.java" % "compiler" % V.Mustache
val slf4jLog4j = "org.slf4j" % "slf4j-log4j12" % V.Slf4j
val slf4jJul = "org.slf4j" % "jul-to-slf4j" % V.Slf4j
val log4j = "log4j" % "log4j" % V.Log4J
val liftMD = "net.liftweb" %% "lift-markdown" % V.LiftMarkdown
object Test {
val junit = "junit" % "junit" % V.JUnit
val mockito = "org.mockito" % "mockito-all" % V.Mockito
}
}
// vim: set ts=4 sw=4 et:
|
abel-von/chaos
|
project/build.scala
|
Scala
|
apache-2.0
| 6,043 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.{CMul => BCMul}
import com.intel.analytics.zoo.pipeline.api.keras.layers.{CMul => ZCMul}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.keras.ZooSpecHelper
import com.intel.analytics.zoo.pipeline.api.keras.serializer.ModuleSerializationTest
class CMulSpec extends ZooSpecHelper {
"CMul (2, 1) Zoo" should "be the same as BigDL" in {
val blayer = BCMul[Float](Array(2, 1))
val zlayer = ZCMul[Float](Array(2, 1), inputShape = Shape(3))
zlayer.build(Shape(-1, 3))
zlayer.getOutputShape().toSingle().toArray should be (Array(-1, 3))
val input = Tensor[Float](Array(2, 3)).rand()
compareOutputAndGradInputSetWeights(blayer, zlayer, input)
}
"CMul (1, 1, 1) Zoo" should "be the same as BigDL" in {
val blayer = BCMul[Float](Array(1, 1, 1))
val zlayer = ZCMul[Float](Array(1, 1, 1), inputShape = Shape(3, 4))
zlayer.build(Shape(-1, 3, 4))
zlayer.getOutputShape().toSingle().toArray should be (Array(-1, 3, 4))
val input = Tensor[Float](Array(2, 3, 4)).rand()
compareOutputAndGradInputSetWeights(blayer, zlayer, input)
}
}
class CMulSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = ZCMul[Float](Array(1, 1, 1), inputShape = Shape(3, 4))
layer.build(Shape(2, 3, 4))
val input = Tensor[Float](2, 3, 4).rand()
runSerializationTest(layer, input)
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/test/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/CMulSpec.scala
|
Scala
|
apache-2.0
| 2,149 |
package io.iohk.ethereum.sync.util
import akka.util.ByteString
import cats.effect.Resource
import io.iohk.ethereum.Mocks.MockValidatorsAlwaysSucceed
import io.iohk.ethereum.blockchain.sync.SyncProtocol
import io.iohk.ethereum.blockchain.sync.fast.FastSync
import io.iohk.ethereum.blockchain.sync.fast.FastSync.SyncState
import io.iohk.ethereum.crypto.kec256
import io.iohk.ethereum.domain.Address
import io.iohk.ethereum.mpt.{HashNode, MptNode, MptTraversals}
import io.iohk.ethereum.sync.util.SyncCommonItSpecUtils.FakePeerCustomConfig.defaultConfig
import io.iohk.ethereum.sync.util.SyncCommonItSpecUtils._
import io.iohk.ethereum.utils.ByteUtils
import monix.eval.Task
import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.util.Try
object FastSyncItSpecUtils {
class FakePeer(peerName: String, fakePeerCustomConfig: FakePeerCustomConfig)
extends CommonFakePeer(peerName, fakePeerCustomConfig) {
lazy val validators = new MockValidatorsAlwaysSucceed
lazy val fastSync = system.actorOf(
FastSync.props(
storagesInstance.storages.fastSyncStateStorage,
storagesInstance.storages.appStateStorage,
bl,
validators,
peerEventBus,
etcPeerManager,
testSyncConfig,
system.scheduler
)
)
def startFastSync(): Task[Unit] = Task {
fastSync ! SyncProtocol.Start
}
def waitForFastSyncFinish(): Task[Boolean] = {
retryUntilWithDelay(Task(storagesInstance.storages.appStateStorage.isFastSyncDone()), 1.second, 90) { isDone =>
isDone
}
}
// Reads whole trie into memory, if the trie lacks nodes in storage it will be None
def getBestBlockTrie(): Option[MptNode] = {
Try {
val bestBlock = bl.getBestBlock()
val bestStateRoot = bestBlock.header.stateRoot
MptTraversals.parseTrieIntoMemory(
HashNode(bestStateRoot.toArray),
storagesInstance.storages.stateStorage.getBackingStorage(bestBlock.number)
)
}.toOption
}
def containsExpectedDataUpToAccountAtBlock(n: BigInt, blockNumber: BigInt): Boolean = {
@tailrec
def go(i: BigInt): Boolean = {
if (i >= n) {
true
} else {
val expectedBalance = i
val accountAddress = Address(i)
val accountExpectedCode = ByteString(i.toByteArray)
val codeHash = kec256(accountExpectedCode)
val accountExpectedStorageAddresses = (i until i + 20).toList
val account = bl.getAccount(accountAddress, blockNumber).get
val code = bl.getEvmCodeByHash(codeHash).get
val storedData = accountExpectedStorageAddresses.map { addr =>
ByteUtils.toBigInt(bl.getAccountStorageAt(account.storageRoot, addr, ethCompatibleStorage = true))
}
val haveAllStoredData = accountExpectedStorageAddresses.zip(storedData).forall { case (address, value) =>
address == value
}
val dataIsCorrect =
account.balance.toBigInt == expectedBalance && code == accountExpectedCode && haveAllStoredData
if (dataIsCorrect) {
go(i + 1)
} else {
false
}
}
}
go(0)
}
def startWithState(): Task[Unit] = {
Task {
val currentBest = bl.getBestBlock().header
val safeTarget = currentBest.number + syncConfig.fastSyncBlockValidationX
val nextToValidate = currentBest.number + 1
val syncState =
SyncState(
pivotBlock = currentBest,
lastFullBlockNumber = currentBest.number,
safeDownloadTarget = safeTarget,
blockBodiesQueue = Seq(),
receiptsQueue = Seq(),
downloadedNodesCount = 0,
totalNodesCount = 0,
bestBlockHeaderNumber = currentBest.number,
nextBlockToFullyValidate = nextToValidate
)
storagesInstance.storages.fastSyncStateStorage.putSyncState(syncState)
}.map(_ => ())
}
}
object FakePeer {
def startFakePeer(peerName: String, fakePeerCustomConfig: FakePeerCustomConfig): Task[FakePeer] = {
for {
peer <- Task(new FakePeer(peerName, fakePeerCustomConfig))
_ <- peer.startPeer()
} yield peer
}
def start1FakePeerRes(
fakePeerCustomConfig: FakePeerCustomConfig = defaultConfig,
name: String
): Resource[Task, FakePeer] = {
Resource.make {
startFakePeer(name, fakePeerCustomConfig)
} { peer =>
peer.shutdown()
}
}
def start2FakePeersRes(
fakePeerCustomConfig1: FakePeerCustomConfig = defaultConfig,
fakePeerCustomConfig2: FakePeerCustomConfig = defaultConfig
): Resource[Task, (FakePeer, FakePeer)] = {
for {
peer1 <- start1FakePeerRes(fakePeerCustomConfig1, "Peer1")
peer2 <- start1FakePeerRes(fakePeerCustomConfig2, "Peer2")
} yield (peer1, peer2)
}
def start3FakePeersRes(
fakePeerCustomConfig1: FakePeerCustomConfig = defaultConfig,
fakePeerCustomConfig2: FakePeerCustomConfig = defaultConfig,
fakePeerCustomConfig3: FakePeerCustomConfig = defaultConfig
): Resource[Task, (FakePeer, FakePeer, FakePeer)] = {
for {
peer1 <- start1FakePeerRes(fakePeerCustomConfig1, "Peer1")
peer2 <- start1FakePeerRes(fakePeerCustomConfig2, "Peer2")
peer3 <- start1FakePeerRes(fakePeerCustomConfig3, "Peer3")
} yield (peer1, peer2, peer3)
}
def start4FakePeersRes(
fakePeerCustomConfig1: FakePeerCustomConfig = defaultConfig,
fakePeerCustomConfig2: FakePeerCustomConfig = defaultConfig,
fakePeerCustomConfig3: FakePeerCustomConfig = defaultConfig,
fakePeerCustomConfig4: FakePeerCustomConfig = defaultConfig
): Resource[Task, (FakePeer, FakePeer, FakePeer, FakePeer)] = {
for {
peer1 <- start1FakePeerRes(fakePeerCustomConfig1, "Peer1")
peer2 <- start1FakePeerRes(fakePeerCustomConfig2, "Peer2")
peer3 <- start1FakePeerRes(fakePeerCustomConfig3, "Peer3")
peer4 <- start1FakePeerRes(fakePeerCustomConfig4, "Peer3")
} yield (peer1, peer2, peer3, peer4)
}
}
}
|
input-output-hk/etc-client
|
src/it/scala/io/iohk/ethereum/sync/util/FastSyncItSpecUtils.scala
|
Scala
|
mit
| 6,251 |
package rest
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import entities.JsonProtocol
import persistence.entities.{SimpleSupplier, Supplier}
import utils.{PersistenceModule, Configuration}
import JsonProtocol._
import SprayJsonSupport._
import scala.util.{Failure, Success}
class SupplierRoutes(modules: Configuration with PersistenceModule) {
private val supplierGetRoute = path("supplier" / IntNumber) { (supId) =>
get {
onComplete((modules.suppliersDal.findById(supId)).mapTo[Option[Supplier]]) {
case Success(supplierOpt) => supplierOpt match {
case Some(sup) => complete(sup)
case None => complete(NotFound, s"The supplier doesn't exist")
}
case Failure(ex) => complete(InternalServerError, s"An error occurred: ${ex.getMessage}")
}
}
}
private val supplierPostRoute = path("supplier") {
post {
entity(as[SimpleSupplier]) { supplierToInsert => onComplete((modules.suppliersDal.insert(Supplier(0, supplierToInsert.name, supplierToInsert.desc)))) {
// ignoring the number of insertedEntities because in this case it should always be one, you might check this in other cases
case Success(insertedEntities) => complete(Created)
case Failure(ex) => complete(InternalServerError, s"An error occurred: ${ex.getMessage}")
}
}
}
}
val routes: Route = supplierPostRoute ~ supplierGetRoute
}
|
Chehao/Akkala
|
game-account/src/main/scala/rest/SupplierRoutes.scala
|
Scala
|
apache-2.0
| 1,569 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.firefly.flows.dispatchers
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.ActorMaterializer
import com.flipkart.connekt.busybees.models.WAContactTracker
import com.flipkart.connekt.commons.services.{ConnektConfig, KeyChainManager}
import com.flipkart.connekt.firefly.sinks.http.HttpRequestTracker
import com.typesafe.config.Config
import com.typesafe.sslconfig.akka.AkkaSSLConfig
import com.typesafe.sslconfig.ssl.{TrustManagerConfig, TrustStoreConfig}
import scala.concurrent.ExecutionContextExecutor
class HttpDispatcher(actorSystemConf: Config) {
implicit val httpSystem: ActorSystem = ActorSystem("firefly-http-out", actorSystemConf)
implicit val httpMat: ActorMaterializer = ActorMaterializer()
implicit val ec: ExecutionContextExecutor = httpSystem.dispatcher
private val insecureHttpFlow = {
// TODO :: Appname
val certificate = KeyChainManager.getWhatsAppCredentials("flipkart").get.getCertificateStr
val trustStoreConfig = TrustStoreConfig(Some(certificate), None).withStoreType("PEM")
val pipelineLimit = ConnektConfig.getInt("wa.contact.check.pipeline.limit").get
val maxConnections = ConnektConfig.getInt("wa.contact.check.max.parallel.connections").get
val trustManagerConfig = TrustManagerConfig().withTrustStoreConfigs(List(trustStoreConfig))
val badSslConfig = AkkaSSLConfig().mapSettings(s => s.withLoose(s.loose
.withAcceptAnyCertificate(true)
.withDisableHostnameVerification(true)
).withTrustManagerConfig(trustManagerConfig))
val badCtx = Http().createClientHttpsContext(badSslConfig)
Http().superPool[WAContactTracker](badCtx, ConnectionPoolSettings(httpSystem).withPipeliningLimit(pipelineLimit).withMaxConnections(maxConnections))(httpMat)
}
val httpPoolFlow = Http().superPool[HttpRequestTracker]()(httpMat)
}
object HttpDispatcher {
var dispatcher: Option[HttpDispatcher] = None
def apply(config: Config) = {
if (dispatcher.isEmpty) {
dispatcher = Some(new HttpDispatcher(config))
}
}
def insecureHttpFlow = dispatcher.map(_.insecureHttpFlow).get
def httpFlow = dispatcher.map(_.httpPoolFlow).get
}
|
Flipkart/connekt
|
firefly/src/main/scala/com/flipkart/connekt/firefly/flows/dispatchers/HttpDispatcher.scala
|
Scala
|
mit
| 2,854 |
package org.karps.row
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
import org.karps.structures.{AugmentedDataType, IsNullable}
import spray.json.{DefaultJsonProtocol, JsArray, JsBoolean, JsNull, JsNumber, JsObject, JsString, JsValue, RootJsonFormat}
import scala.util.{Failure, Success, Try}
/**
* A representation of a row that is easy to manipulate with
* algebraic datatypes.
*/
case class AlgebraicRow(cells: Seq[Cell])
object AlgebraicRow extends DefaultJsonProtocol {
import org.karps.structures.JsonSparkConversions.{sequence, get}
import Cell.CellOrdering
def fromRow(r: Row, st: StructType): Try[AlgebraicRow] = {
Cell.from(r.toSeq, st) match {
case Success(RowCell(c)) => Success(c)
case Success(x) => Failure(new Exception(s"Got $x from $st -> $r"))
case Failure(e) => Failure(e)
}
}
def toRow(ar: AlgebraicRow): Row = Row(ar.cells.map(Cell.toAny):_*)
/**
* Attempts to denormalize the row.
*/
def denormalize(ar: AlgebraicRow): Try[Cell] = {
ar.cells match {
case Seq(c) => Success(c)
case x => Failure(new Exception(s"Expected single cell, got $x"))
}
}
// Defines a canonical ordering across any row and any cell.
// The content need not match
object RowOrdering extends Ordering[AlgebraicRow] {
override def compare(x: AlgebraicRow, y: AlgebraicRow): Int = {
Cell.CellOrdering.compare(RowArray(x.cells), RowArray(y.cells))
}
}
}
|
krapsh/kraps-server
|
src/main/scala/org/karps/row/AlgebraicRow.scala
|
Scala
|
apache-2.0
| 1,475 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression}
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, UnknownPartitioning}
import org.apache.spark.sql.execution.exchange.{ReusedExchangeExec, ShuffleExchangeExec}
import org.apache.spark.sql.execution._
import org.apache.spark.sql.vectorized.ColumnarBatch
/**
* A wrapper of shuffle query stage, which follows the given partition arrangement.
*
* @param child It is usually `ShuffleQueryStageExec`, but can be the shuffle exchange
* node during canonicalization.
* @param partitionSpecs The partition specs that defines the arrangement.
* @param description The string description of this shuffle reader.
*/
case class ColumnarCustomShuffleReaderExec(
child: SparkPlan,
partitionSpecs: Seq[ShufflePartitionSpec],
description: String)
extends UnaryExecNode {
// We don't extends CustomShuffleReaderExec since it has private constructor
override def output: Seq[Attribute] = child.output
override lazy val outputPartitioning: Partitioning = {
// If it is a local shuffle reader with one mapper per task, then the output partitioning is
// the same as the plan before shuffle.
// TODO this check is based on assumptions of callers' behavior but is sufficient for now.
if (partitionSpecs.forall(_.isInstanceOf[PartialMapperPartitionSpec]) &&
partitionSpecs.map(_.asInstanceOf[PartialMapperPartitionSpec].mapIndex).toSet.size ==
partitionSpecs.length) {
child match {
case ShuffleQueryStageExec(_, s: ShuffleExchangeExec) =>
s.child.outputPartitioning
case ShuffleQueryStageExec(_, r @ ReusedExchangeExec(_, s: ShuffleExchangeExec)) =>
s.child.outputPartitioning match {
case e: Expression => r.updateAttr(e).asInstanceOf[Partitioning]
case other => other
}
case _ =>
throw new IllegalStateException("operating on canonicalization plan")
}
} else {
UnknownPartitioning(partitionSpecs.length)
}
}
override def stringArgs: Iterator[Any] = Iterator(description)
private var cachedShuffleRDD: RDD[ColumnarBatch] = null
override def supportsColumnar: Boolean = true
override protected def doExecuteColumnar(): RDD[ColumnarBatch] = {
if (cachedShuffleRDD == null) {
cachedShuffleRDD = child match {
case stage: ShuffleQueryStageExec =>
new ShuffledColumnarBatchRDD(
stage.shuffle.asInstanceOf[ColumnarShuffleExchangeExec].columnarShuffleDependency,
stage.shuffle.asInstanceOf[ColumnarShuffleExchangeExec].readMetrics,
partitionSpecs.toArray)
case _ =>
throw new IllegalStateException("operating on canonicalization plan")
}
}
cachedShuffleRDD
}
override protected def doExecute(): RDD[InternalRow] =
throw new UnsupportedOperationException()
}
|
Intel-bigdata/OAP
|
oap-native-sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ColumnarCustomShuffleReaderExec.scala
|
Scala
|
apache-2.0
| 3,888 |
package com.fang.page
import com.thoughtworks.binding.{Binding, dom}
import org.scalajs.dom.raw.{Event, Node}
import com.fang.ImplicitConvert._
import com.fang.UserStatus
import com.fang.UserStatus.UpdateUS
import com.fang.data.GlobalValue
import com.fang.game.rules.Rules
import com.fang.segment.UserStatusNavBar
import com.thoughtworks.binding.Binding.Vars
import org.scalajs.dom.window
class ChooseGamePage(val userId: String) extends Page{
val allGames: Vars[String] = Vars()
override def title(): String = "choose the game"
@dom override def onLoad(): Binding[Node] = {
DomUtil.assignVars(allGames, Rules.allRules)
<div>
{UserStatusNavBar().bind}
<div class="container with-padding">
<h1><b>Find Game</b></h1>
<div class="padding10"></div>
<h2>Which game you want to play</h2>
{
for(name <- allGames) yield
<button class="btn btn-info btn-block"
onclick={_:Event => onSelectGame(name)}>{name}</button>
}
<div class="padding10"></div>
<button class="btn btn-danger btn-block" onclick={_:Event => window.history.back()}>Back</button>
</div>
</div>
}
def onSelectGame(name: String):Unit = {
GlobalValue.userStatusSession match {
case None =>
window.alert("please login first")
window.location.hash = "login"
case Some(x) =>
x.sendMessage(UpdateUS(UserStatus.waiting(userId, name)))
}
}
}
|
TianhaoFang/online-go
|
js/src/main/scala/com/fang/page/ChooseGamePage.scala
|
Scala
|
mit
| 1,474 |
package ir.fulltext.indri
import java.nio.file.{Path, Paths}
import util.Config
/**
* <pre>
* Created on 2017/01/13.
* </pre>
*
* @author K.Sakamoto
*/
object IndriWordLevelIndexer {
private val indices: Array[String] = Config.wordLevelIndriIndices.toArray
private val segmentations: Array[String] = Config.wordLevelIndriSegmentations.toArray
private val resources: Array[String] = Config.trecTextFormatData.toArray
private val reviser: TrecTextFileFormatReviser = new TrecTextFileFormatReviser(1, false)
def run(): Unit = {
if (segmentations.length == indices.length && resources.length == indices.length) {
for (i <- indices.indices) {
val resource: Path = Paths.get(resources(i))
val segmentation: Path = Paths.get(segmentations(i))
val indexPath: Path = Paths.get(indices(i))
reviser.reviseInDirectory(resource, segmentation)
new IndriIndex(segmentation, indexPath).index()
println()
}
}
}
def main(args: Array[String]): Unit = {
run()
}
}
|
ktr-skmt/FelisCatusZero
|
src/main/scala/ir/fulltext/indri/IndriWordLevelIndexer.scala
|
Scala
|
apache-2.0
| 1,050 |
package dotty.tools.dotc
package printing
import core._
import Texts._, Types._, Flags._, Names._, Symbols._, NameOps._, Constants._, Denotations._
import StdNames._
import Contexts._
import Scopes.Scope, Denotations.Denotation, Annotations.Annotation
import StdNames.nme
import ast.Trees._
import typer.Implicits._
import typer.ImportInfo
import Variances.varianceSign
import util.SourcePosition
import scala.util.control.NonFatal
import scala.annotation.switch
class PlainPrinter(_ctx: Context) extends Printer {
/** The context of all public methods in Printer and subclasses.
* Overridden in RefinedPrinter.
*/
protected def curCtx: Context = _ctx.addMode(Mode.Printing)
protected given [DummyToEnforceDef]: Context = curCtx
protected def printDebug = ctx.settings.YprintDebug.value
private var openRecs: List[RecType] = Nil
protected def maxToTextRecursions: Int = 100
protected def showUniqueIds = ctx.settings.uniqid.value || Printer.debugPrintUnique
protected def showNestingLevel = ctx.settings.YprintLevel.value
protected final def limiter: MessageLimiter = ctx.property(MessageLimiter).get
protected def controlled(op: => Text): Text = limiter.controlled(op)
def Str(str: String, lineRange: LineRange = EmptyLineRange): Str =
limiter.register(str)
Texts.Str(str, lineRange)
given stringToText: Conversion[String, Text] = Str(_)
/** If true, tweak output so it is the same before and after pickling */
protected def homogenizedView: Boolean = ctx.settings.YtestPickler.value
protected def debugPos: Boolean = ctx.settings.YdebugPos.value
def homogenize(tp: Type): Type =
if (homogenizedView)
tp match {
case tp: ThisType if tp.cls.is(Package) && !tp.cls.isEffectiveRoot =>
requiredPackage(tp.cls.fullName).termRef
case tp: TypeVar if tp.isInstantiated =>
homogenize(tp.instanceOpt)
case AndType(tp1, tp2) =>
homogenize(tp1) & homogenize(tp2)
case OrType(tp1, tp2) =>
homogenize(tp1) | homogenize(tp2)
case AnnotatedType(parent, annot)
if !ctx.mode.is(Mode.Type) && annot.symbol == defn.UncheckedVarianceAnnot =>
homogenize(parent)
case tp: SkolemType =>
homogenize(tp.info)
case tp: LazyRef =>
homogenize(tp.ref)
case tp @ AppliedType(tycon, args) =>
if (defn.isCompiletimeAppliedType(tycon.typeSymbol)) tp.tryCompiletimeConstantFold
else tycon.dealias.appliedTo(args)
case tp: NamedType =>
tp.reduceProjection
case _ =>
tp
}
else tp
private def sameBound(lo: Type, hi: Type): Boolean =
try lo frozen_=:= hi catch { case NonFatal(ex) => false }
private def homogenizeArg(tp: Type) = tp match {
case TypeBounds(lo, hi) if homogenizedView && sameBound(lo, hi) => homogenize(hi)
case _ => tp
}
private def selfRecName(n: Int) = s"z$n"
/** If the name of the symbol's owner should be used when you care about
* seeing an interesting name: in such cases this symbol is e.g. a method
* parameter with a synthetic name, a constructor named "this", an object
* "package", etc. The kind string, if non-empty, will be phrased relative
* to the name of the owner.
*/
protected def hasMeaninglessName(sym: Symbol): Boolean = (
sym.is(Param) && sym.owner.isSetter // x$1
|| sym.isClassConstructor // this
|| (sym.name == nme.PACKAGE) // package
)
def nameString(name: Name): String =
if (name eq tpnme.FromJavaObject) && !printDebug
then nameString(tpnme.Object)
else name.toString
def toText(name: Name): Text = Str(nameString(name))
/** String representation of a name used in a refinement
* In refined printing this undoes type parameter expansion
*/
protected def refinementNameString(tp: RefinedType): String = nameString(tp.refinedName)
/** String representation of a refinement */
protected def toTextRefinement(rt: RefinedType): Closed =
(refinementNameString(rt) ~ toTextRHS(rt.refinedInfo)).close
protected def argText(arg: Type): Text = homogenizeArg(arg) match {
case arg: TypeBounds => "?" ~ toText(arg)
case arg => toText(arg)
}
/** Pretty-print comma-separated type arguments for a constructor to be inserted among parentheses or brackets
* (hence with `GlobalPrec` precedence).
*/
protected def argsText(args: List[Type]): Text =
atPrec(GlobalPrec) { Text(args.map(arg => argText(arg) ), ", ") }
/** The longest sequence of refinement types, starting at given type
* and following parents.
*/
private def refinementChain(tp: Type): List[Type] =
tp :: (tp match {
case tp: RefinedType => refinementChain(tp.parent.stripTypeVar)
case _ => Nil
})
/** Direct references to these symbols are printed without their prefix for convenience.
* They are either aliased in scala.Predef or in the scala package object, as well as `Object`
*/
private lazy val printWithoutPrefix: Set[Symbol] =
(defn.ScalaPredefModule.termRef.typeAliasMembers
++ defn.ScalaPackageObject.termRef.typeAliasMembers).map(_.info.classSymbol).toSet
+ defn.ObjectClass
+ defn.FromJavaObjectSymbol
def toText(tp: Type): Text = controlled {
homogenize(tp) match {
case tp: TypeType =>
toTextRHS(tp)
case tp: TermRef
if !tp.denotationIsCurrent && !homogenizedView || // always print underlying when testing picklers
tp.symbol.is(Module) || tp.symbol.name == nme.IMPORT =>
toTextRef(tp) ~ ".type"
case tp: TermRef if tp.denot.isOverloaded =>
"<overloaded " ~ toTextRef(tp) ~ ">"
case tp: TypeRef =>
if (printWithoutPrefix.contains(tp.symbol))
toText(tp.name)
else
toTextPrefix(tp.prefix) ~ selectionString(tp)
case tp: TermParamRef =>
ParamRefNameString(tp) ~ lambdaHash(tp.binder) ~ ".type"
case tp: TypeParamRef =>
val suffix =
if showNestingLevel then
val tvar = ctx.typerState.constraint.typeVarOfParam(tp)
if tvar.exists then s"#${tvar.asInstanceOf[TypeVar].nestingLevel.toString}" else ""
else ""
ParamRefNameString(tp) ~ lambdaHash(tp.binder) ~ suffix
case tp: SingletonType =>
toTextSingleton(tp)
case AppliedType(tycon, args) =>
(toTextLocal(tycon) ~ "[" ~ argsText(args) ~ "]").close
case tp: RefinedType =>
val parent :: (refined: List[RefinedType @unchecked]) =
refinementChain(tp).reverse
toTextLocal(parent) ~ "{" ~ Text(refined map toTextRefinement, "; ").close ~ "}"
case tp: RecType =>
try {
openRecs = tp :: openRecs
"{" ~ selfRecName(openRecs.length) ~ " => " ~ toTextGlobal(tp.parent) ~ "}"
}
finally openRecs = openRecs.tail
case AndType(tp1, tp2) =>
changePrec(AndTypePrec) { toText(tp1) ~ " & " ~ atPrec(AndTypePrec + 1) { toText(tp2) } }
case OrType(tp1, tp2) =>
changePrec(OrTypePrec) { toText(tp1) ~ " | " ~ atPrec(OrTypePrec + 1) { toText(tp2) } }
case MatchType(bound, scrutinee, cases) =>
changePrec(GlobalPrec) {
def caseText(tp: Type): Text = tp match {
case tp: HKTypeLambda => caseText(tp.resultType)
case defn.MatchCase(pat, body) => "case " ~ toText(pat) ~ " => " ~ toText(body)
case _ => "case " ~ toText(tp)
}
def casesText = Text(cases.map(caseText), "\\n")
atPrec(InfixPrec) { toText(scrutinee) } ~
keywordStr(" match ") ~ "{" ~ casesText ~ "}" ~
(" <: " ~ toText(bound) provided !bound.isAny)
}.close
case tp: PreviousErrorType if ctx.settings.XprintTypes.value =>
"<error>" // do not print previously reported error message because they may try to print this error type again recuresevely
case tp: ErrorType =>
s"<error ${tp.msg.rawMessage}>"
case tp: WildcardType =>
if (tp.optBounds.exists) "<?" ~ toTextRHS(tp.bounds) ~ ">" else "<?>"
case NoType =>
"<notype>"
case NoPrefix =>
"<noprefix>"
case tp: MethodType =>
changePrec(GlobalPrec) {
"("
~ keywordText("using ").provided(tp.isContextualMethod)
~ keywordText("erased ").provided(tp.isErasedMethod)
~ keywordText("implicit ").provided(tp.isImplicitMethod && !tp.isContextualMethod)
~ paramsText(tp)
~ (if tp.resultType.isInstanceOf[MethodType] then ")" else "): ")
~ toText(tp.resultType)
}
case tp: ExprType =>
changePrec(GlobalPrec) { "=> " ~ toText(tp.resultType) }
case tp: HKTypeLambda =>
changePrec(GlobalPrec) {
"[" ~ paramsText(tp) ~ "]" ~ lambdaHash(tp) ~ Str(" =>> ") ~ toTextGlobal(tp.resultType)
}
case tp: PolyType =>
changePrec(GlobalPrec) {
"[" ~ paramsText(tp) ~ "]" ~ lambdaHash(tp) ~
(Str(" => ") provided !tp.resultType.isInstanceOf[MethodType]) ~
toTextGlobal(tp.resultType)
}
case AnnotatedType(tpe, annot) =>
if annot.symbol == defn.InlineParamAnnot || annot.symbol == defn.ErasedParamAnnot then toText(tpe)
else toTextLocal(tpe) ~ " " ~ toText(annot)
case tp: TypeVar =>
if (tp.isInstantiated)
toTextLocal(tp.instanceOpt) ~ (Str("^") provided printDebug)
else {
val constr = ctx.typerState.constraint
val bounds =
if constr.contains(tp) then
withMode(Mode.Printing)(TypeComparer.fullBounds(tp.origin))
else
TypeBounds.empty
if (bounds.isTypeAlias) toText(bounds.lo) ~ (Str("^") provided printDebug)
else if (ctx.settings.YshowVarBounds.value) "(" ~ toText(tp.origin) ~ "?" ~ toText(bounds) ~ ")"
else toText(tp.origin)
}
case tp: LazyRef =>
def refTxt =
try toTextGlobal(tp.ref)
catch {
case ex: Throwable => Str("...")
}
"LazyRef(" ~ refTxt ~ ")"
case _ =>
tp.fallbackToText(this)
}
}.close
def toTextSingleton(tp: SingletonType): Text =
"(" ~ toTextRef(tp) ~ " : " ~ toTextGlobal(tp.underlying) ~ ")"
protected def paramsText(lam: LambdaType): Text = {
def paramText(name: Name, tp: Type) =
toText(name) ~ lambdaHash(lam) ~ toTextRHS(tp, isParameter = true)
Text(lam.paramNames.lazyZip(lam.paramInfos).map(paramText), ", ")
}
protected def ParamRefNameString(name: Name): String = nameString(name)
protected def ParamRefNameString(param: ParamRef): String =
ParamRefNameString(param.binder.paramNames(param.paramNum))
/** The name of the symbol without a unique id. */
protected def simpleNameString(sym: Symbol): String = nameString(sym.name)
/** If -uniqid is set, the hashcode of the lambda type, after a # */
protected def lambdaHash(pt: LambdaType): Text =
if (showUniqueIds)
try "#" + pt.hashCode
catch { case ex: NullPointerException => "" }
else ""
/** A string to append to a symbol composed of:
* - if -uniqid is set, its unique id after a #.
* - if -Yprint-level, its nesting level after a %.
*/
protected def idString(sym: Symbol): String =
(if (showUniqueIds || Printer.debugPrintUnique) "#" + sym.id else "") +
(if (showNestingLevel) "%" + sym.nestingLevel else "")
def nameString(sym: Symbol): String =
simpleNameString(sym) + idString(sym) // + "<" + (if (sym.exists) sym.owner else "") + ">"
def fullNameString(sym: Symbol): String =
if (sym eq defn.FromJavaObjectSymbol) && !printDebug then
fullNameString(defn.ObjectClass)
else if sym.isRoot || sym == NoSymbol || sym.owner.isEffectiveRoot then
nameString(sym)
else
fullNameString(fullNameOwner(sym)) + "." + nameString(sym)
protected def fullNameOwner(sym: Symbol): Symbol = sym.effectiveOwner.enclosingClass
protected def objectPrefix: String = "object "
protected def packagePrefix: String = "package "
protected def trimPrefix(text: Text): Text =
text.stripPrefix(objectPrefix).stripPrefix(packagePrefix)
protected def selectionString(tp: NamedType): String = {
val sym = if (homogenizedView) tp.symbol else tp.currentSymbol
if (sym.exists) nameString(sym) else nameString(tp.name)
}
/** The string representation of this type used as a prefix */
def toTextRef(tp: SingletonType): Text = controlled {
tp match {
case tp: TermRef =>
toTextPrefix(tp.prefix) ~ selectionString(tp)
case tp: ThisType =>
nameString(tp.cls) + ".this"
case SuperType(thistpe: SingletonType, _) =>
toTextRef(thistpe).map(_.replaceAll("""\\bthis$""", "super").nn)
case SuperType(thistpe, _) =>
"Super(" ~ toTextGlobal(thistpe) ~ ")"
case tp @ ConstantType(value) =>
toText(value)
case pref: TermParamRef =>
nameString(pref.binder.paramNames(pref.paramNum))
case tp: RecThis =>
val idx = openRecs.reverse.indexOf(tp.binder)
if (idx >= 0) selfRecName(idx + 1)
else "{...}.this" // TODO move underlying type to an addendum, e.g. ... z3 ... where z3: ...
case tp: SkolemType =>
if (homogenizedView) toText(tp.info)
else if (ctx.settings.XprintTypes.value) "<" ~ toText(tp.repr) ~ ":" ~ toText(tp.info) ~ ">"
else toText(tp.repr)
}
}
/** The string representation of this type used as a prefix, including separator */
def toTextPrefix(tp: Type): Text = controlled {
homogenize(tp) match {
case NoPrefix => ""
case tp: SingletonType => toTextRef(tp) ~ "."
case tp => trimPrefix(toTextLocal(tp)) ~ "#"
}
}
protected def isOmittablePrefix(sym: Symbol): Boolean =
defn.unqualifiedOwnerTypes.exists(_.symbol == sym) || isEmptyPrefix(sym)
protected def isEmptyPrefix(sym: Symbol): Boolean =
sym.isEffectiveRoot || sym.isAnonymousClass || sym.name.isReplWrapperName
/** String representation of a definition's type following its name,
* if symbol is completed, "?" otherwise.
*/
protected def toTextRHS(optType: Option[Type]): Text = optType match {
case Some(tp) => toTextRHS(tp)
case None => "?"
}
protected def decomposeLambdas(bounds: TypeBounds): (Text, TypeBounds) =
def decompose(tp: Type): (Text, Type) = tp.stripTypeVar match
case lam: HKTypeLambda =>
val names =
if lam.isDeclaredVarianceLambda then
lam.paramNames.lazyZip(lam.declaredVariances).map((name, v) =>
varianceSign(v) + name)
else lam.paramNames.map(_.toString)
val infos = lam.paramInfos.map(toText)
val tparams = names.zip(infos).map(_ ~ _)
("[" ~ Text(tparams, ",") ~ "]", lam.resType)
case _ =>
("", tp)
bounds match
case bounds: AliasingBounds =>
val (tparamStr, aliasRhs) = decompose(bounds.alias)
(tparamStr, bounds.derivedAlias(aliasRhs))
case TypeBounds(lo, hi) =>
val (_, loRhs) = decompose(lo)
val (tparamStr, hiRhs) = decompose(hi)
(tparamStr, bounds.derivedTypeBounds(loRhs, hiRhs))
end decomposeLambdas
/** String representation of a definition's type following its name */
protected def toTextRHS(tp: Type, isParameter: Boolean = false): Text = controlled {
homogenize(tp) match {
case tp: TypeBounds =>
val (tparamStr, rhs) = decomposeLambdas(tp)
val binder = rhs match
case tp: AliasingBounds =>
" = " ~ toText(tp.alias)
case TypeBounds(lo, hi) =>
(if (lo isRef defn.NothingClass) Text() else " >: " ~ toText(lo))
~ (if hi.isAny || (!printDebug && hi.isFromJavaObject) then Text() else " <: " ~ toText(hi))
tparamStr ~ binder
case tp @ ClassInfo(pre, cls, cparents, decls, selfInfo) =>
val preText = toTextLocal(pre)
val (tparams, otherDecls) = decls.toList partition treatAsTypeParam
val tparamsText =
if (tparams.isEmpty) Text() else ("[" ~ dclsText(tparams) ~ "]").close
val selfText: Text = selfInfo match {
case NoType => Text()
case sym: Symbol if !sym.isCompleted => "this: ? =>"
case _ => "this: " ~ atPrec(InfixPrec) { toText(tp.selfType) } ~ " =>"
}
val trueDecls = otherDecls.filterNot(treatAsTypeArg)
val declsText =
if (trueDecls.isEmpty || !ctx.settings.Ydebug.value) Text()
else dclsText(trueDecls)
tparamsText ~ " extends " ~ toTextParents(tp.parents) ~~ "{" ~ selfText ~ declsText ~
"} at " ~ preText
case mt: MethodType =>
toTextGlobal(mt)
case tp: ExprType =>
// parameterless methods require special treatment, see #11201
(if (isParameter) ": => " else ": ") ~ toTextGlobal(tp.widenExpr)
case tp: PolyType =>
"[" ~ paramsText(tp) ~ "]"
~ (Str(": ") provided !tp.resultType.isInstanceOf[MethodType])
~ toTextGlobal(tp.resultType)
case tp =>
": " ~ toTextGlobal(tp)
}
}
protected def toTextParents(parents: List[Type]): Text = Text(parents.map(toTextLocal), " with ")
protected def treatAsTypeParam(sym: Symbol): Boolean = false
protected def treatAsTypeArg(sym: Symbol): Boolean = false
/** String representation of symbol's kind. */
def kindString(sym: Symbol): String = {
val flags = sym.flagsUNSAFE
if (flags.is(PackageClass)) "package class"
else if (flags.is(PackageVal)) "package"
else if (sym.isPackageObject)
if (sym.isClass) "package object class"
else "package object"
else if (sym.isAnonymousClass) "anonymous class"
else if (flags.is(ModuleClass)) "object class"
else if (flags.is(ModuleVal)) "object"
else if (flags.is(Trait)) "trait"
else if (sym.isClass) "class"
else if (sym.isType) "type"
else if (sym.isGetter) "getter"
else if (sym.isSetter) "setter"
else if sym.is(Param) then "parameter"
else if sym.is(Given) then "given instance"
else if (flags.is(Lazy)) "lazy value"
else if (flags.is(Mutable)) "variable"
else if (sym.isClassConstructor && sym.isPrimaryConstructor) "primary constructor"
else if (sym.isClassConstructor) "constructor"
else if (sym.is(Method)) "method"
else if (sym.isTerm) "value"
else ""
}
/** String representation of symbol's definition keyword */
protected def keyString(sym: Symbol): String = {
val flags = sym.flagsUNSAFE
if (flags.isAllOf(JavaInterface)) "interface"
else if (flags.is(Trait)) "trait"
else if (flags.is(Module)) "object"
else if (sym.isClass) "class"
else if (sym.isType) "type"
else if (flags.is(Mutable)) "var"
else if (flags.is(Package)) "package"
else if (sym.is(Method)) "def"
else if (sym.isTerm && !flags.is(Param)) "val"
else ""
}
protected def privateWithinString(sym: Symbol): String =
if (sym.exists && sym.privateWithin.exists)
nameString(sym.privateWithin.name.stripModuleClassSuffix)
else ""
/** String representation of symbol's flags */
protected def toTextFlags(sym: Symbol): Text = toTextFlags(sym, sym.flagsUNSAFE)
protected def toTextFlags(sym: Symbol, flags: FlagSet): Text =
Text(flags.flagStrings(privateWithinString(sym)).map(flag => stringToText(keywordStr(flag))), " ")
def annotsText(sym: Symbol): Text = Text(sym.annotations.map(toText))
def dclText(sym: Symbol): Text = dclTextWithInfo(sym, sym.unforcedInfo)
def dclText(d: SingleDenotation): Text = dclTextWithInfo(d.symbol, Some(d.info))
private def dclTextWithInfo(sym: Symbol, info: Option[Type]): Text =
(toTextFlags(sym) ~~ keyString(sym) ~~
(varianceSign(sym.variance) ~ nameString(sym)) ~ toTextRHS(info)).close
def toText(sym: Symbol): Text =
(kindString(sym) ~~ {
if (sym.isAnonymousClass) toTextParents(sym.info.parents) ~~ "{...}"
else if (hasMeaninglessName(sym) && !printDebug) simpleNameString(sym.owner) + idString(sym)
else if sym.is(Package) then fullNameString(sym)
else nameString(sym)
}).close
def locationText(sym: Symbol): Text =
if (!sym.exists) ""
else {
val ownr = sym.effectiveOwner
if (ownr.isClass && !isEmptyPrefix(ownr)) " in " ~ toText(ownr) else Text()
}
def locatedText(sym: Symbol): Text =
(toText(sym) ~ locationText(sym)).close
def extendedLocationText(sym: Symbol): Text =
if (!sym.exists) ""
else {
def recur(ownr: Symbol, innerLocation: String): Text = {
def nextOuter(innerKind: String): Text =
recur(ownr.effectiveOwner,
if (!innerLocation.isEmpty) innerLocation
else s" in an anonymous $innerKind")
def showLocation(ownr: Symbol, where: String): Text =
innerLocation ~ " " ~ where ~ " " ~ toText(ownr)
if (ownr.isAnonymousClass) nextOuter("class")
else if (ownr.isAnonymousFunction) nextOuter("function")
else if (isEmptyPrefix(ownr)) ""
else if (ownr.isLocalDummy) showLocation(ownr.owner, "locally defined in")
else if (ownr.isTerm && !ownr.isOneOf(Module | Method)) showLocation(ownr, "in the initializer of")
else showLocation(ownr, "in")
}
recur(sym.owner, "")
}
def toText(denot: Denotation): Text = toText(denot.symbol) ~ "/D"
private def escapedChar(ch: Char): String = (ch: @switch) match {
case '\\b' => "\\\\b"
case '\\t' => "\\\\t"
case '\\n' => "\\\\n"
case '\\f' => "\\\\f"
case '\\r' => "\\\\r"
case '"' => "\\\\\\""
case '\\'' => "\\\\\\'"
case '\\\\' => "\\\\\\\\"
case _ => if ch.isControl then f"${"\\\\"}u${ch.toInt}%04x" else String.valueOf(ch).nn
}
def toText(const: Constant): Text = const.tag match {
case StringTag => stringText("\\"" + escapedString(const.value.toString) + "\\"")
case ClazzTag => "classOf[" ~ toText(const.typeValue) ~ "]"
case CharTag => literalText(s"'${escapedChar(const.charValue)}'")
case LongTag => literalText(const.longValue.toString + "L")
case DoubleTag => literalText(const.doubleValue.toString + "d")
case FloatTag => literalText(const.floatValue.toString + "f")
case _ => literalText(String.valueOf(const.value).nn)
}
/** Usual target for `Annotation#toText`, overridden in RefinedPrinter */
def annotText(annot: Annotation): Text = s"@${annot.symbol.name}"
def toText(annot: Annotation): Text = annot.toText(this)
def toText(param: LambdaParam): Text =
varianceSign(param.paramVariance)
~ toText(param.paramName)
~ (if param.isTypeParam then "" else ": ")
~ toText(param.paramInfo)
protected def escapedString(str: String): String = str flatMap escapedChar
def dclsText(syms: List[Symbol], sep: String): Text = Text(syms map dclText, sep)
def toText(sc: Scope): Text =
("Scope{" ~ dclsText(sc.toList) ~ "}").close
def toText[T >: Untyped](tree: Tree[T]): Text = {
def toTextElem(elem: Any): Text = elem match {
case elem: Showable => elem.toText(this)
case elem: List[?] => "List(" ~ Text(elem map toTextElem, ",") ~ ")"
case elem => elem.toString
}
val nodeName = tree.productPrefix
val elems =
Text(tree.productIterator.map(toTextElem).toList, ", ")
val tpSuffix =
if (ctx.settings.XprintTypes.value && tree.hasType)
" | " ~ toText(tree.typeOpt)
else
Text()
nodeName ~ "(" ~ elems ~ tpSuffix ~ ")" ~ (Str(tree.sourcePos.toString) provided printDebug)
}.close
def toText(pos: SourcePosition): Text =
if (!pos.exists) "<no position>"
else if (pos.source.exists) s"${pos.source.file.name}:${pos.line + 1}"
else s"(no source file, offset = ${pos.span.point})"
def toText(result: SearchResult): Text = result match {
case result: SearchSuccess =>
"SearchSuccess: " ~ toText(result.ref) ~ " via " ~ toText(result.tree)
case result: SearchFailure =>
result.reason match {
case _: NoMatchingImplicits => "No Matching Implicit"
case _: DivergingImplicit => "Diverging Implicit"
case result: AmbiguousImplicits =>
"Ambiguous Implicit: " ~ toText(result.alt1.ref) ~ " and " ~ toText(result.alt2.ref)
case _ =>
"Search Failure: " ~ toText(result.tree)
}
}
def toText(importInfo: ImportInfo): Text =
val siteStr = importInfo.site.show
val exprStr = if siteStr.endsWith(".type") then siteStr.dropRight(5) else siteStr
val selectorStr = importInfo.selectors match
case sel :: Nil if sel.renamed.isEmpty && sel.bound.isEmpty =>
if sel.isGiven then "given" else sel.name.show
case _ => "{...}"
s"import $exprStr.$selectorStr"
def toText(c: OrderingConstraint): Text =
val savedConstraint = ctx.typerState.constraint
try
// The current TyperState constraint determines how type variables are printed
ctx.typerState.constraint = c
def entryText(tp: Type) = tp match {
case tp: TypeBounds =>
toText(tp)
case _ =>
" := " ~ toText(tp)
}
val indent = 3
val uninstVarsText = " uninstantiated variables: " ~
Text(c.uninstVars.map(toText), ", ")
val constrainedText =
" constrained types: " ~ Text(c.domainLambdas.map(toText), ", ")
val boundsText =
" bounds: " ~ {
val assocs =
for (param <- c.domainParams)
yield (" " * indent) ~ toText(param) ~ entryText(c.entry(param))
Text(assocs, "\\n")
}
val orderingText =
" ordering: " ~ {
val deps =
for {
param <- c.domainParams
ups = c.minUpper(param)
if ups.nonEmpty
}
yield
(" " * indent) ~ toText(param) ~ " <: " ~
Text(ups.map(toText), ", ")
Text(deps, "\\n")
}
//Printer.debugPrintUnique = false
Text.lines(List(uninstVarsText, constrainedText, boundsText, orderingText))
finally
ctx.typerState.constraint = savedConstraint
def plain: PlainPrinter = this
protected def keywordStr(text: String): String = coloredStr(text, SyntaxHighlighting.KeywordColor)
protected def keywordText(text: String): Text = coloredStr(text, SyntaxHighlighting.KeywordColor)
protected def valDefText(text: Text): Text = coloredText(text, SyntaxHighlighting.ValDefColor)
protected def typeText(text: Text): Text = coloredText(text, SyntaxHighlighting.TypeColor)
protected def literalText(text: Text): Text = coloredText(text, SyntaxHighlighting.LiteralColor)
protected def stringText(text: Text): Text = coloredText(text, SyntaxHighlighting.StringColor)
protected def coloredStr(text: String, color: String): String =
if (ctx.useColors) color + text + SyntaxHighlighting.NoColor else text
protected def coloredText(text: Text, color: String): Text =
if (ctx.useColors) color ~ text ~ SyntaxHighlighting.NoColor else text
}
|
dotty-staging/dotty
|
compiler/src/dotty/tools/dotc/printing/PlainPrinter.scala
|
Scala
|
apache-2.0
| 27,130 |
package com.krux.hyperion.aws
/**
* A condition that must be met before the object can run.
* The activity cannot run until all its conditions are met.
*/
trait AdpPrecondition extends AdpDataPipelineObject {
/**
* The IAM role to use for this precondition.
*/
def role: String
/**
* The precondition will be retried until the retryTimeout with a gap of retryDelay between attempts.
* Time period; for example, "1 hour".
*/
def preconditionTimeout: Option[String]
/**
* Maximum number attempt retries on failure
*/
def maximumRetries: Option[String]
/**
* Actions to run when current object fails.
*/
def onFail: Option[Seq[AdpRef[AdpAction]]]
/**
* Actions that should be triggered if an object has not yet been scheduled or still not
* completed.
*/
def onLateAction: Option[Seq[AdpRef[AdpAction]]]
/**
* Actions to run when current object succeeds.
*/
def onSuccess: Option[Seq[AdpRef[AdpAction]]]
}
/**
* A precondition to check that data exists in a DynamoDB table.
*
* @param tableName The DynamoDB table to check.
*/
case class AdpDynamoDBDataExistsPrecondition (
id: String,
name: Option[String],
tableName: String,
role: String,
preconditionTimeout: Option[String],
maximumRetries: Option[String],
onFail: Option[Seq[AdpRef[AdpAction]]],
onLateAction: Option[Seq[AdpRef[AdpAction]]],
onSuccess: Option[Seq[AdpRef[AdpAction]]]
) extends AdpPrecondition {
val `type` = "DynamoDBDataExists"
}
/**
* A precondition to check that the DynamoDB table exists.
*
* @param tableName The DynamoDB table to check.
*/
case class AdpDynamoDBTableExistsPrecondition(
id: String,
name: Option[String],
tableName: String,
role: String,
preconditionTimeout: Option[String],
maximumRetries: Option[String],
onFail: Option[Seq[AdpRef[AdpAction]]],
onLateAction: Option[Seq[AdpRef[AdpAction]]],
onSuccess: Option[Seq[AdpRef[AdpAction]]]
) extends AdpPrecondition {
val `type` = "DynamoDBTableExists"
}
/**
* Checks whether a data node object exists.
*/
case class AdpExistsPrecondition(
id: String,
name: Option[String],
role: String,
preconditionTimeout: Option[String],
maximumRetries: Option[String],
onFail: Option[Seq[AdpRef[AdpAction]]],
onLateAction: Option[Seq[AdpRef[AdpAction]]],
onSuccess: Option[Seq[AdpRef[AdpAction]]]
) extends AdpPrecondition {
val `type` = "Exists"
}
/**
* Checks whether a key exists in an Amazon S3 data node.
*
* @param s3Key Amazon S3 key to check for existence.
*/
case class AdpS3KeyExistsPrecondition(
id: String,
name: Option[String],
s3Key: String,
role: String,
preconditionTimeout: Option[String],
maximumRetries: Option[String],
onFail: Option[Seq[AdpRef[AdpAction]]],
onLateAction: Option[Seq[AdpRef[AdpAction]]],
onSuccess: Option[Seq[AdpRef[AdpAction]]]
) extends AdpPrecondition {
val `type` = "S3KeyExists"
}
/**
* A precondition to check that the Amazon S3 objects with the given prefix (represented as a URI) are present.
*
* @param s3Prefix The Amazon S3 prefix to check for existence of objects.
*/
case class AdpS3PrefixNotEmptyPrecondition(
id: String,
name: Option[String],
s3Prefix: String,
role: String,
preconditionTimeout: Option[String],
maximumRetries: Option[String],
onFail: Option[Seq[AdpRef[AdpAction]]],
onLateAction: Option[Seq[AdpRef[AdpAction]]],
onSuccess: Option[Seq[AdpRef[AdpAction]]]
) extends AdpPrecondition {
val `type` = "S3PrefixNotEmpty"
}
/**
* A Unix/Linux shell command that can be run as a precondition.
*
* @param command The command to run. This value and any associated parameters must function in the environment from which you are running the Task Runner.
* @param scriptArgument A list of arguments to pass to the shell script.
* @param scriptUri An Amazon S3 URI path for a file to download and run as a shell command. Only one scriptUri or command field should be present. scriptUri cannot use parameters, use command instead.
* @param stdout The Amazon S3 path that receives redirected output from the command. If you use the runsOn field, this must be an Amazon S3 path because of the transitory nature of the resource running your activity. However if you specify the workerGroup field, a local file path is permitted.
* @param stderr The Amazon S3 path that receives redirected system error messages from the command. If you use the runsOn field, this must be an Amazon S3 path because of the transitory nature of the resource running your activity. However if you specify the workerGroup field, a local file path is permitted.
*
*/
case class AdpShellCommandPrecondition(
id: String,
name: Option[String],
command: Option[String],
scriptUri: Option[String],
scriptArgument: Option[Seq[String]],
stdout: Option[String],
stderr: Option[String],
role: String,
preconditionTimeout: Option[String],
maximumRetries: Option[String],
onFail: Option[Seq[AdpRef[AdpAction]]],
onLateAction: Option[Seq[AdpRef[AdpAction]]],
onSuccess: Option[Seq[AdpRef[AdpAction]]]
) extends AdpPrecondition {
val `type` = "ShellCommandPrecondition"
}
|
sethyates/hyperion
|
core/src/main/scala/com/krux/hyperion/aws/AdpPreconditions.scala
|
Scala
|
apache-2.0
| 5,159 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package viper.silicon.tests
import org.scalatest.FunSuite
import org.scalatest.Matchers
import viper.silicon.state.Identifier
import DSL._
import viper.silicon.state.terms._
class SimpleArithmeticTermSolverTests extends FunSuite with Matchers {
import SimpleArithmeticSolver.{solve, SolverResult, SolvingSuccess, SolvingFailure}
test("Pre-solving errors") {
assert(solve(b, y, y).isInstanceOf[SolverResult])
assert(solve(x, b, y).isInstanceOf[SolverResult])
assert(solve(y, y, y).isInstanceOf[SolverResult])
assert(solve(x, x, y).isInstanceOf[SolverResult])
assert(solve(x, y + y, y).isInstanceOf[SolverResult])
}
test("Simple successes") {
solve(x, y, y) should be (SolvingSuccess(y, x))
solve(x, y + `1`, y) should be (SolvingSuccess(y, x - `1`))
solve(x, n + y, y) should be (SolvingSuccess(y, x - n))
solve(x, `0` - y, y) should be (SolvingSuccess(y, `0` - x))
solve(x, y - x, y) should be (SolvingSuccess(y, x + x))
}
test("Simple failures") {
solve(x, y + `1`, y) should not be SolvingSuccess(y, x + `1`)
solve(x, `0` - y, y) should not be SolvingSuccess(y, x - `0`)
}
test("Successes") {
solve(x, (`1` + y) - (n + x), y) should be (SolvingSuccess(y, x + (n + x) - `1`))
solve(x, (y - f(x)) - f(m), y) should be (SolvingSuccess(y, x + f(m) + f(x)))
}
test("Failures") {
solve(x, (n + f(y)) - m, y) should be (SolvingFailure(x + m - n, f(y), y))
}
}
/* TODO: Add more operators/handy functions; make generally available */
private[tests] object DSL {
implicit class ArithmeticOperators(t1: Term) {
def +(t2: Term) = Plus(t1, t2)
def -(t2: Term) = Minus(t1, t2)
def *(t2: Term) = Times(t1, t2)
def /(t2: Term) = Div(t1, t2)
def >(t2: Term) = Greater(t1, t2)
}
implicit class BooleanOperators(t1: Term) {
def &&(t2: Term) = And(t1, t2)
def ==>(t2: Term) = Implies(t1, t2)
}
val x = Var(Identifier("x"), sorts.Int)
val y = Var(Identifier("y"), sorts.Int)
val z = Var(Identifier("z"), sorts.Int)
val n = Var(Identifier("n"), sorts.Int)
val m = Var(Identifier("m"), sorts.Int)
val b = Var(Identifier("b"), sorts.Int)
val `0` = IntLiteral(0)
val `1` = IntLiteral(1)
val `2` = IntLiteral(2)
private val f1 = Fun(Identifier("f"), sorts.Int, sorts.Int)
private val g1 = Fun(Identifier("g"), sorts.Int, sorts.Int)
private val f2 = Fun(Identifier("f"), Seq(sorts.Int, sorts.Int), sorts.Int)
private val g2 = Fun(Identifier("g"), Seq(sorts.Int, sorts.Int), sorts.Int)
private val f3 = Fun(Identifier("f"), Seq(sorts.Int, sorts.Int, sorts.Int), sorts.Int)
private val g3 = Fun(Identifier("g"), Seq(sorts.Int, sorts.Int, sorts.Int), sorts.Int)
def f(t: Term) = App(f1, t)
def g(t: Term) = App(g1, t)
def f(t1: Term, t2: Term) = App(f2, Seq(t1, t2))
def g(t1: Term, t2: Term) = App(g2, Seq(t1, t2))
def f(t1: Term, t2: Term, t3: Term) = App(f3, Seq(t1, t2, t3))
def g(t1: Term, t2: Term, t3: Term) = App(g3, Seq(t1, t2, t3))
}
|
sccblom/vercors
|
viper/silicon/src/test/scala/SimpleArithmeticTermSolverTests.scala
|
Scala
|
mpl-2.0
| 3,292 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.{SparkConf, SparkEnv}
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
class SparkPlanSuite extends QueryTest with SharedSparkSession {
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
test("SPARK-21619 execution of a canonicalized plan should fail") {
val plan = spark.range(10).queryExecution.executedPlan.canonicalized
intercept[IllegalStateException] { plan.execute() }
intercept[IllegalStateException] { plan.executeCollect() }
intercept[IllegalStateException] { plan.executeCollectPublic() }
intercept[IllegalStateException] { plan.executeToIterator() }
intercept[IllegalStateException] { plan.executeBroadcast() }
intercept[IllegalStateException] { plan.executeTake(1) }
intercept[IllegalStateException] { plan.executeTail(1) }
}
test("SPARK-23731 plans should be canonicalizable after being (de)serialized") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") {
withTempPath { path =>
spark.range(1).write.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
val fileSourceScanExec =
df.queryExecution.sparkPlan.collectFirst { case p: FileSourceScanExec => p }.get
val serializer = SparkEnv.get.serializer.newInstance()
val readback =
serializer.deserialize[FileSourceScanExec](serializer.serialize(fileSourceScanExec))
try {
readback.canonicalized
} catch {
case e: Throwable => fail("FileSourceScanExec was not canonicalizable", e)
}
}
}
}
test("SPARK-27418 BatchScanExec should be canonicalizable after being (de)serialized") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") {
withTempPath { path =>
spark.range(1).write.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
val batchScanExec =
df.queryExecution.sparkPlan.collectFirst { case p: BatchScanExec => p }.get
val serializer = SparkEnv.get.serializer.newInstance()
val readback =
serializer.deserialize[BatchScanExec](serializer.serialize(batchScanExec))
try {
readback.canonicalized
} catch {
case e: Throwable => fail("BatchScanExec was not canonicalizable", e)
}
}
}
}
test("SPARK-25357 SparkPlanInfo of FileScan contains nonEmpty metadata") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") {
withTempPath { path =>
spark.range(5).write.parquet(path.getAbsolutePath)
val f = spark.read.parquet(path.getAbsolutePath)
assert(SparkPlanInfo.fromSparkPlan(f.queryExecution.sparkPlan).metadata.nonEmpty)
}
}
}
test("SPARK-30780 empty LocalTableScan should use RDD without partitions") {
assert(LocalTableScanExec(Nil, Nil).execute().getNumPartitions == 0)
}
}
|
Intel-bigdata/OAP
|
oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala
|
Scala
|
apache-2.0
| 4,837 |
/*
* Copyright 2014 Kevin Herron
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.digitalpetri.ethernetip.cip.epath
import com.digitalpetri.ethernetip.util.Buffers
import io.netty.buffer.ByteBuf
sealed abstract class DataSegment extends EPathSegment
object DataSegment {
val SegmentType = 0x80
}
case class AnsiDataSegment(data: String) extends DataSegment
case class SimpleDataSegment(data: Seq[Short]) extends DataSegment
object AnsiDataSegment {
val TypeByte = 0x91 // SegmentType + Ansi sub-type
def encode(segment: AnsiDataSegment, buffer: ByteBuf = Buffers.unpooled()): ByteBuf = {
val data: String = {
if (segment.data.length <= 255) segment.data
else segment.data.substring(0, 255)
}
buffer.writeByte(TypeByte)
buffer.writeByte(data.length)
buffer.writeBytes(data.getBytes("ASCII"))
if (data.length % 2 != 0) buffer.writeByte(0)
buffer
}
def decode(buffer: ByteBuf): AnsiDataSegment = {
val typeByte = buffer.readUnsignedByte()
assert(typeByte == TypeByte)
val length = buffer.readUnsignedByte()
assert(length >= 0 && length <= 255)
val bytes = buffer.readBytes(length)
if (length % 2 != 0) buffer.skipBytes(1)
AnsiDataSegment(new String(bytes.array(), "ASCII"))
}
}
object SimpleDataSegment {
val TypeByte = 0x80 // SegmentType + Simple sub-type
def encode(segment: SimpleDataSegment, buffer: ByteBuf = Buffers.unpooled()): ByteBuf = {
buffer.writeByte(TypeByte)
buffer.writeByte(segment.data.length)
segment.data.foreach(buffer.writeShort(_))
buffer
}
def decode(buffer: ByteBuf): SimpleDataSegment = {
val typeByte = buffer.readUnsignedByte()
assert(typeByte == TypeByte)
val length = buffer.readUnsignedByte()
assert(length >= 0 && length <= 255)
def readData(words: Seq[Short], remaining: Int): Seq[Short] = {
if (remaining == 0) words
else readData(words :+ buffer.readShort(), remaining - 1)
}
val data = readData(Seq.empty, length)
SimpleDataSegment(data)
}
}
|
digitalpetri/scala-ethernet-ip
|
enip-core/src/main/scala/com/digitalpetri/ethernetip/cip/epath/DataSegment.scala
|
Scala
|
apache-2.0
| 2,576 |
import gruenewa.grid.GridRun
object Hello {
def main(args: Array[String]) {
val func = (n: Int) => printf("Hello #%d!\\n", n)
GridRun { gridify =>
(1 to 100).foreach(gridify(func))
}
}
}
|
gruenewa/gruenewa-grid
|
samples/Hello.scala
|
Scala
|
gpl-3.0
| 211 |
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle
import scala.{specialized => spec}
import vec._
import index._
import ops._
import scalar._
import util.Concat.Promoter
import java.io.OutputStream
/**
* `Vec` is an immutable container for 1D homogeneous data (a "vector"). It is
* backed by an array and indexed from 0 to length - 1.
*
* Several element access methods are provided.
*
* The `apply()` method returns a slice of the original vector:
*
* {{{
* val v = Vec(1,2,3,4)
* v(0) == Vec(1)
* v(1, 2) == Vec(2,3)
* }}}
*
* The `at` method returns an instance of a [[org.saddle.scalar.Scalar]], which behaves
* much like an `Option` in that it can be either an instance of [[org.saddle.scalar.NA]]
* or a [[org.saddle.scalar.Value]] case class:
*
* {{{
* Vec[Int](1,2,3,na).at(0) == Scalar(1)
* Vec[Int](1,2,3,na).at(3) == NA
* }}}
*
*
* The method `raw` accesses the underlying value directly.
*
* {{{
* Vec(1d,2,3).raw(0) == 1d
* }}}
*
* `Vec` may be used in arithemetic expressions which operate on two `Vec`s or on a
* `Vec` and a scalar value. A few examples:
*
* {{{
* Vec(1,2,3,4) + Vec(2,3,4,5) == Vec(3,5,7,9)
* Vec(1,2,3,4) * 2 == Vec(2,4,6,8)
* }}}
*
* Note, Vec is implicitly convertible to an array for convenience; this could be
* abused to mutate the contents of the Vec. Try to avoid this!
*
* @tparam T Type of elements within the Vec
*/
trait Vec[@spec(Boolean, Int, Long, Double) T] extends NumericOps[Vec[T]] with Serializable{
/**
* The number of elements in the container F
*/
def length: Int
/**
* A ScalarTag in the type of the elements of the Vec
*/
def scalarTag: ScalarTag[T]
/**
* Danger - could expose internal NA's
*
* Access an element by location. This is made private because the internal
* representation might contain primitive NA's that need to be boxed so that
* they aren't utilized unknowingly in calculations.
*/
private[saddle] def apply(loc: Int): T
/**
* Set to true when the vec is shifted over the backing array
*/
protected def needsCopy: Boolean = false
// ----------
// get values
/**
* Access a boxed element of a Vec[A] at a single location
* @param loc offset into Vec
*/
def at(loc: Int): Scalar[T] = {
implicit val st = scalarTag
apply(loc)
}
/**
* Access an unboxed element of a Vec[A] at a single location
* @param loc offset into Vec
*/
def raw(loc: Int): T = {
apply(loc)
}
/**
* Slice a Vec at a sequence of locations, e.g.
*
* val v = Vec(1,2,3,4,5)
* v(1,3) == Vec(2,4)
*
* @param locs locations at which to slice
*/
def apply(locs: Int*): Vec[T] = take(locs.toArray)
/**
* Slice a Vec at a sequence of locations, e.g.
*
* val v = Vec(1,2,3,4,5)
* v(Array(1,3)) == Vec(2,4)
*
* @param locs locations at which to slice
*/
def apply(locs: Array[Int]): Vec[T] = take(locs)
/**
* Slice a Vec at a bound of locations, e.g.
*
* val v = Vec(1,2,3,4,5)
* v(1->3) == Vec(2,3,4)
*
* @param rng evaluates to IRange
*/
def apply(rng: Slice[Int]): Vec[T] = {
val idx = new IndexIntRange(length)
val pair = rng(idx)
slice(pair._1, pair._2)
}
/**
* Access the first element of a Vec[A], or NA if length is zero
*/
def first: Scalar[T] = {
implicit val st = scalarTag
if (length > 0) apply(0) else NA
}
/**
* Access the last element of a Vec[A], or NA if length is zero
*/
def last: Scalar[T] = {
implicit val st = scalarTag
if (length > 0) apply(length - 1) else NA
}
// ----------
/**
* Return copy of backing array
*/
def contents: Array[T] = copy.toArray
/**
* Return first n elements
* @param n Number of elements to access
*/
def head(n: Int): Vec[T] = slice(0, n)
/**
* Return last n elements
* @param n Number of elements to access
*/
def tail(n: Int): Vec[T] = slice(length - n, length)
/**
* True if and only if number of elements is zero
*/
def isEmpty: Boolean = length == 0
/**
* Equivalent to slicing operation; e.g.
*
* {{{
* val v = Vec(1,2,3)
* v.take(0,1) == v(0,1)
* }}}
*
* @param locs Location of elements to take
*/
def take(locs: Array[Int]): Vec[T]
/**
* The complement of the take operation; slice out
* elements NOT specified in list.
*
* @param locs Location of elements not to take
*/
def without(locs: Array[Int]): Vec[T]
/**
* Returns Vec whose locations corresponding to true entries in the
* boolean input mask vector are set to NA
*
* @param m Mask vector of Vec[Boolean]
*/
def mask(m: Vec[Boolean]): Vec[T] = VecImpl.mask(this, m, scalarTag.missing)(scalarTag)
/**
* Returns Vec whose locations are NA where the result of the
* provided function evaluates to true
*
* @param f A function taking an element and returning a Boolean
*/
def mask(f: T => Boolean): Vec[T] = VecImpl.mask(this, f, scalarTag.missing)(scalarTag)
/**
* Concatenate two Vec instances together, where there exists some way to
* join the type of elements. For instance, Vec[Double] concat Vec[Int]
* will promote Int to Double as a result of the implicit existence of an
* instance of Promoter[Double, Int, Double]
*
* @param v Vec[B] to concat
* @param wd Implicit evidence of Promoter[A, B, C]
* @param mc Implicit evidence of ST[C]
* @tparam B type of other Vec elements
* @tparam C type of resulting Vec elements
*/
def concat[B, C](v: Vec[B])(implicit wd: Promoter[T, B, C], mc: ST[C]): Vec[C]
/**
* Additive inverse of Vec with numeric elements
*
*/
def unary_-(): Vec[T]
// Must implement specialized methods independently of specialized class, workaround to
// https://issues.scala-lang.org/browse/SI-5281
/**
* Map a function over the elements of the Vec, as in scala collections library
*/
def map[@spec(Boolean, Int, Long, Double) B: ST](f: T => B): Vec[B]
/**
* Maps a function over elements of the Vec and flattens the result.
*/
def flatMap[@spec(Boolean, Int, Long, Double) B : ST](f: T => Vec[B]): Vec[B]
/**
* Left fold over the elements of the Vec, as in scala collections library
*/
def foldLeft[@spec(Boolean, Int, Long, Double) B: ST](init: B)(f: (B, T) => B): B
/**
* Left scan over the elements of the Vec, as in scala collections library
*/
def scanLeft[@spec(Boolean, Int, Long, Double) B: ST](init: B)(f: (B, T) => B): Vec[B]
/**
* Filtered left fold over the elements of the Vec, as in scala collections library
*/
def filterFoldLeft[@spec(Boolean, Int, Long, Double) B: ST](pred: T => Boolean)(init: B)(f: (B, T) => B): B
/**
* Filtered left scan over elements of the Vec, as in scala collections library
*/
def filterScanLeft[@spec(Boolean, Int, Long, Double) B: ST](pred: T => Boolean)(init: B)(f: (B, T) => B): Vec[B]
/**
* Left fold that folds only while the test condition holds true. As soon as the condition function yields
* false, the fold returns.
*
* @param cond Function whose signature is the same as the fold function, except that it evaluates to Boolean
*/
def foldLeftWhile[@spec(Boolean, Int, Long, Double) B: ST](init: B)(f: (B, T) => B)(cond: (B, T) => Boolean): B
/**
* Zips Vec with another Vec and applies a function to the paired elements. If either of the pair is NA, the
* result is forced to NA.
* @param other Vec[B]
* @param f Function (A, B) => C
* @tparam B Parameter of other Vec
* @tparam C Result of function
*/
def zipMap[@spec(Int, Long, Double) B: ST,
@spec(Boolean, Int, Long, Double) C: ST](other: Vec[B])(f: (T, B) => C): Vec[C]
/**
* Drop the elements of the Vec which are NA
*/
def dropNA: Vec[T]
/**
* Return true if there is an NA value in the Vec
*/
def hasNA: Boolean
/**
* Execute a (side-effecting) operation on each (non-NA) element in the vec
* @param op operation to execute
*/
def foreach(op: T => Unit) { VecImpl.foreach(this)(op)(scalarTag) }
/**
* Execute a (side-effecting) operation on each (non-NA) element in vec which satisfies
* some predicate.
* @param pred Function A => Boolean
* @param op Side-effecting function
*/
def forall(pred: T => Boolean)(op: T => Unit) { VecImpl.forall(this)(pred)(op)(scalarTag) }
/**
* Return Vec of integer locations (offsets) which satisfy some predicate
* @param pred Predicate function from A => Boolean
*/
def find(pred: T => Boolean): Vec[Int] = VecImpl.find(this)(pred)(scalarTag)
/**
* Return first integer location which satisfies some predicate, or -1 if there is none
* @param pred Predicate function from A => Boolean
*/
def findOne(pred: T => Boolean): Int = VecImpl.findOne(this)(pred)(scalarTag)
/**
* Return true if there exists some element of the Vec which satisfies the predicate function
* @param pred Predicate function from A => Boolean
*/
def exists(pred: T => Boolean): Boolean = findOne(pred) != -1
/**
* Return Vec whose elements satisfy a predicate function
* @param pred Predicate function from A => Boolean
*/
def filter(pred: T => Boolean): Vec[T] = VecImpl.filter(this)(pred)(scalarTag)
/**
* Return vec whose offets satisfy a predicate function
* @param pred Predicate function from Int => Boolean
*/
def filterAt(pred: Int => Boolean): Vec[T] = VecImpl.filterAt(this)(pred)(scalarTag)
/**
* Return Vec whose elements are selected via a Vec of booleans (where that Vec holds the value true)
* @param pred Predicate vector: Vec[Boolean]
*/
def where(pred: Vec[Boolean]): Vec[T] = VecImpl.where(this)(pred.toArray)(scalarTag)
/**
* Produce a Vec whose entries are the result of executing a function on a sliding window of the
* data.
* @param winSz Window size
* @param f Function Vec[A] => B to operate on sliding window
* @tparam B Result type of function
*/
def rolling[@spec(Boolean, Int, Long, Double) B: ST](winSz: Int, f: Vec[T] => B): Vec[B]
/**
* Yield a Vec whose elements have been sorted (in ascending order)
* @param ev evidence of Ordering[A]
*/
def sorted(implicit ev: ORD[T], st: ST[T]) = take(array.argsort(toArray))
/**
* Yield a Vec whose elements have been reversed from their original order
*/
def reversed: Vec[T] = {
implicit val tag = scalarTag
Vec(array.reverse(toArray))
}
/**
* Creates a view into original vector from an offset up to, but excluding,
* another offset. Data is not copied.
*
* @param from Beginning offset
* @param until One past ending offset
* @param stride Increment within slice
*/
def slice(from: Int, until: Int, stride: Int = 1): Vec[T]
/**
* Creates a view into original vector from an offset up to, and including,
* another offset. Data is not copied.
*
* @param from Beginning offset
* @param to Ending offset
* @param stride Increment within slice
*/
def sliceBy(from: Int, to: Int, stride: Int = 1): Vec[T] =
slice(from, to + stride, stride)
/**
* Split Vec into two Vecs at position i
* @param i Position at which to split Vec
*/
def splitAt(i: Int): (Vec[T], Vec[T]) = (slice(0, i), slice(i, length))
/**
* Creates a view into original Vec, but shifted so that n
* values at the beginning or end of the Vec are NA's. Data
* is not copied.
*
* @param n Number of offsets to shift
*/
def shift(n: Int): Vec[T]
/**
* Replaces all NA values for which there is a non-NA value at a lower offset
* with the corresponding highest-offset, non-NA value. E.g,
*
* {{{
* Vec(1, 2, NA, 3, NA).pad == Vec(1, 2, 2, 3, 3)
* Vec(NA, 1, 2, NA).pad == Vec(NA, 1, 2, 2)
* }}}
*
*/
def pad: Vec[T] = VecImpl.pad(this)(scalarTag)
/**
* Replaces all NA values for which there is a non-NA value at a lower offset
* with the corresponding highest-offset, non-NA value; but looking back only
* at most N positions.
*
* {{{
* Vec(1, 2, NA, 3, NA).padAtMost(1) == Vec(1, 2, 2, 3, 3)
* Vec(NA, 1, 2, NA).padAtMost(1) == Vec(NA, 1, 2, 2)
* Vec(1, NA, NA, 3, NA).padAtMost(1) == Vec(1, 1, NA, 3, 3)
* }}}
*
*/
def padAtMost(n: Int): Vec[T] = VecImpl.pad(this, n)(scalarTag)
/**
* Fills NA values in vector with result of a function which acts on the index of
* the particular NA value found
*
* @param f A function from Int => A; yields value for NA value at ith position
*/
def fillNA(f: Int => T): Vec[T] = VecImpl.vecfillNA(this)(f)(scalarTag)
/**
* Converts Vec to an indexed sequence (default implementation is immutable.Vector)
*
*/
def toSeq: IndexedSeq[T] = toArray.toIndexedSeq
/**
* Returns a Vec whose backing array has been copied
*/
protected def copy: Vec[T]
private[saddle] def toArray: Array[T]
private[saddle] def toDoubleArray(implicit na: NUM[T]): Array[Double] = {
val arr = toArray
val buf = new Array[Double](arr.length)
var i = 0
while(i < arr.length) {
buf(i) = scalarTag.toDouble(arr(i))
i += 1
}
buf
}
/** Default hashcode is simple rolling prime multiplication of sums of hashcodes for all values. */
override def hashCode(): Int = foldLeft(1)(_ * 31 + _.hashCode())
/**
* Default equality does an iterative, element-wise equality check of all values.
*
* NB: to avoid boxing, is overwritten in child classes
*/
override def equals(o: Any): Boolean = o match {
case rv: Vec[_] => (this eq rv) || (this.length == rv.length) && {
var i = 0
var eq = true
while(eq && i < this.length) {
eq &&= (apply(i) == rv(i) || this.scalarTag.isMissing(apply(i)) && rv.scalarTag.isMissing(rv(i)))
i += 1
}
eq
}
case _ => false
}
/**
* Creates a string representation of Vec
* @param len Max number of elements to include
*/
def stringify(len: Int = 10): String = {
val half = len / 2
val buf = new StringBuilder()
implicit val st = scalarTag
val maxf = (a: Int, b: String) => math.max(a, b.length)
if (length == 0)
buf append "Empty Vec"
else {
buf.append("[%d x 1]\\n" format (length))
val vlen = { head(half) concat tail(half) }.map(scalarTag.show(_)).foldLeft(0)(maxf)
def createRow(r: Int): String = ("%" + { if (vlen > 0) vlen else 1 } + "s\\n").format(scalarTag.show(apply(r)))
buf append util.buildStr(len, length, createRow, " ... \\n" )
}
buf.toString()
}
/**
* Pretty-printer for Vec, which simply outputs the result of stringify.
* @param len Number of elements to display
*/
def print(len: Int = 10, stream: OutputStream = System.out) {
stream.write(stringify(len).getBytes)
}
override def toString = stringify()
}
object Vec extends BinOpVec with VecStatsImplicits with VecBoolEnricher {
// **** constructions
/**
* Factory method to create a Vec from an array of elements
*
* @param arr Array
* @tparam T Type of elements in array
*/
def apply[T](arr: Array[T])(implicit st: ST[T]): Vec[T] = st.makeVec(arr)
/**
* Factory method to create a Vec from a sequence of elements. For example,
*
* {{{
* Vec(1,2,3)
* Vec(Seq(1,2,3) : _*)
* }}}
*
* @param values Sequence
* @tparam T Type of elements in Vec
*/
def apply[T: ST](values: T*): Vec[T] = Vec(values.toArray)
/**
* Creates an empty Vec of type T.
*
* @tparam T Vec type parameter
*/
def empty[T: ST]: Vec[T] = Vec(Array.empty[T])
// **** conversions
// Vec is isomorphic to array
/**
* A Vec may be implicitly converted to an array. Use responsibly;
* please do not circumvent immutability of Vec class!
* @param s Vec
* @tparam T Type parameter of Vec
*/
implicit def vecToArray[T](s: Vec[T]) = s.toArray
/**
* An array may be implicitly converted to a Vec.
* @param arr Array
* @tparam T Type parameter of Array
*/
implicit def arrayToVec[T: ST](arr: Array[T]) = Vec(arr)
/**
* A Vec may be implicitly ''widened'' to a Vec.
*
* @param s Vec to widen to Series
* @tparam A Type of elements in Vec
*/
implicit def vecToSeries[A: ST](s: Vec[A]) = Series(s)
/**
* A Vec may be implicitly converted to a single column Mat
*/
implicit def vecToMat[A: ST](s: Vec[A]): Mat[A] = Mat(s)
}
|
jyt109/saddle
|
saddle-core/src/main/scala/org/saddle/Vec.scala
|
Scala
|
apache-2.0
| 17,157 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.hbase.data
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.{DataStoreFinder, Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.hbase.data.HBaseDataStoreParams.{ConnectionParam, HBaseCatalogParam}
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.iterators.StatsScan
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.{FeatureUtils, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.io.WithClose
import org.locationtech.geomesa.utils.stats.CountStat
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class HBaseSamplingFilterTest extends Specification with LazyLogging {
import scala.collection.JavaConverters._
sequential
"Hbase" should {
"working with sampling" in {
val typeName = "testSampling"
val params = Map(
ConnectionParam.getName -> MiniCluster.connection,
HBaseCatalogParam.getName -> getClass.getSimpleName
)
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[HBaseDataStore]
ds must not(beNull)
try {
ds.getSchema(typeName) must beNull
ds.createSchema(SimpleFeatureTypes.createType(typeName,
"name:String,track:String,dtg:Date,*geom:Point:srid=4326;geomesa.indices.enabled=s2:geom"))
val sft = ds.getSchema(typeName)
val features =
(0 until 10).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track1", s"2010-05-07T0$i:00:00.000Z", s"POINT(40 6$i)")
} ++ (10 until 20).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track2", s"2010-05-${i}T$i:00:00.000Z", s"POINT(40 6${i - 10})")
} ++ (20 until 30).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track3", s"2010-05-${i}T${i-10}:00:00.000Z", s"POINT(40 8${i - 20})")
}
WithClose(ds.getFeatureWriterAppend(typeName, Transaction.AUTO_COMMIT)) { writer =>
features.foreach(f => FeatureUtils.write(writer, f, useProvidedFid = true))
}
def runQuery(query: Query): Seq[SimpleFeature] =
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
{
// 0
//sampling disabled
//this filter return all feature i need to test sampling return size 30 without sampling
val filter = "bbox(geom, -179, -89, 179, 89)"+
" AND dtg between '2009-05-07T00:00:00.000Z' and '2011-05-08T00:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter), Array("name","track"))
val features = runQuery(query)
features must haveSize(30)
}
{
// 1
//filter enabled
//trasformer disabled
//sample-by enabled
//this filter return all feature i need to test sampling return size 30 without sampling
val filter = "bbox(geom, -179, -89, 179, 89)"+
" AND dtg between '2009-05-07T00:00:00.000Z' and '2011-05-08T00:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(QueryHints.SAMPLING, 0.1f)
query.getHints.put(QueryHints.SAMPLE_BY, "track")
val features = runQuery(query)
features must haveSize(12)
features(0).getAttribute("dtg") must not beNull
features.filter(p=>p.getAttribute("track").equals("track1")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track2")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track3")).size must greaterThan(1)
}
{
// 2
//filter enabled
//trasformer enabled
//sample-by enabled
//this filter return all feature i need to test sampling
val filter = "bbox(geom, -179, -89, 179, 89)"+
" AND dtg between '2009-05-07T00:00:00.000Z' and '2011-05-08T00:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter), Array("name","track"))
query.getHints.put(QueryHints.SAMPLING, 0.1f)
query.getHints.put(QueryHints.SAMPLE_BY, "track")
val features = runQuery(query)
features must haveSize(12)
features(0).getAttribute("dtg") must beNull
features.filter(p=>p.getAttribute("track").equals("track1")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track2")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track3")).size must greaterThan(1)
}
{
// 3
//filter disabled
//trasformer enabled
//sample-by enabled
val query = new Query(sft.getTypeName)
query.setPropertyNames(Array("name","track"))
query.getHints.put(QueryHints.SAMPLING, 0.1f)
query.getHints.put(QueryHints.SAMPLE_BY, "track")
val features = runQuery(query)
features must haveSize(12)
features(0).getAttribute("dtg") must beNull
features.filter(p=>p.getAttribute("track").equals("track1")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track2")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track3")).size must greaterThan(1)
}
{
// 4
//filter disabled
//trasformer disabled
//sample-by enabled
val query = new Query(sft.getTypeName)
query.getHints.put(QueryHints.SAMPLING, 0.1f)
query.getHints.put(QueryHints.SAMPLE_BY, "track")
val features = runQuery(query)
features must haveSize(12)
features(0).getAttribute("dtg") must not beNull
features.filter(p=>p.getAttribute("track").equals("track1")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track2")).size must greaterThan(1)
features.filter(p=>p.getAttribute("track").equals("track3")).size must greaterThan(1)
}
{
// 5
//filter enabled
//trasformer disabled
//sample-by disabled
//this filter return all feature i need to test sampling
val filter = "bbox(geom, -179, -89, 179, 89)"+
" AND dtg between '2009-05-07T00:00:00.000Z' and '2011-05-08T00:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(QueryHints.SAMPLING, 0.1f)
val features = runQuery(query)
features must haveSize(4)
features(0).getAttribute("dtg") must not beNull
}
{
// 6
//filter enabled
//trasformer enabled
//sample-by disabled
//this filter return all feature i need to test sampling
val filter = "bbox(geom, -179, -89, 179, 89)"+
" AND dtg between '2009-05-07T00:00:00.000Z' and '2011-05-08T00:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter), Array("name","track"))
query.getHints.put(QueryHints.SAMPLING, 0.1f)
val features = runQuery(query)
features must haveSize(4)
features(0).getAttribute("dtg") must beNull
}
{
// 7
//filter disabled
//trasformer enabled
//sample-by disabled
val query = new Query(sft.getTypeName)
query.setPropertyNames(Array("name","track"))
query.getHints.put(QueryHints.SAMPLING, 0.1f)
val features = runQuery(query)
features must haveSize(4)
features(0).getAttribute("dtg") must beNull
}
{
// 8
//filter disabled
//trasformer disabled
//sample-by disabled
val query = new Query(sft.getTypeName)
query.getHints.put(QueryHints.SAMPLING, 0.1f)
val features = runQuery(query)
features must haveSize(4)
features(0).getAttribute("dtg") must not beNull
}
{
//check interaction with aggregations
val query = new Query(sft.getTypeName)
query.getHints.put(QueryHints.STATS_STRING, "Count()")
query.getHints.put(QueryHints.ENCODE_STATS, java.lang.Boolean.TRUE)
query.getHints.put(QueryHints.SAMPLING, 0.1f)
val features = runQuery(query)
val stat:CountStat = StatsScan.decodeStat(sft)(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)
.next.getAttribute(0).asInstanceOf[String]).asInstanceOf[CountStat]
stat.count must beEqualTo(4)
}
} finally {
ds.dispose()
}
}
}
}
|
locationtech/geomesa
|
geomesa-hbase/geomesa-hbase-datastore/src/test/scala/org/locationtech/geomesa/hbase/data/HBaseSamplingFilterTest.scala
|
Scala
|
apache-2.0
| 9,660 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.transaction
import java.nio.ByteBuffer
import java.util.concurrent.locks.ReentrantLock
import kafka.log.Log
import kafka.server.{FetchDataInfo, LogOffsetMetadata, ReplicaManager}
import kafka.utils.{MockScheduler, Pool}
import org.scalatest.Assertions.fail
import kafka.zk.KafkaZkClient
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests.TransactionResult
import org.apache.kafka.common.utils.MockTime
import org.junit.Assert.{assertEquals, assertFalse, assertTrue}
import org.junit.{After, Before, Test}
import org.easymock.{Capture, EasyMock, IAnswer}
import scala.collection.Map
import scala.collection.mutable
import scala.collection.JavaConverters._
class TransactionStateManagerTest {
val partitionId = 0
val numPartitions = 2
val transactionTimeoutMs: Int = 1000
val topicPartition = new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId)
val coordinatorEpoch = 10
val txnRecords: mutable.ArrayBuffer[SimpleRecord] = mutable.ArrayBuffer[SimpleRecord]()
val time = new MockTime()
val scheduler = new MockScheduler(time)
val zkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
val replicaManager: ReplicaManager = EasyMock.createNiceMock(classOf[ReplicaManager])
EasyMock.expect(zkClient.getTopicPartitionCount(TRANSACTION_STATE_TOPIC_NAME))
.andReturn(Some(numPartitions))
.anyTimes()
EasyMock.replay(zkClient)
val txnConfig = TransactionConfig()
val transactionManager: TransactionStateManager = new TransactionStateManager(0, zkClient, scheduler, replicaManager, txnConfig, time)
val transactionalId1: String = "one"
val transactionalId2: String = "two"
val txnMessageKeyBytes1: Array[Byte] = TransactionLog.keyToBytes(transactionalId1)
val txnMessageKeyBytes2: Array[Byte] = TransactionLog.keyToBytes(transactionalId2)
val producerIds: Map[String, Long] = Map[String, Long](transactionalId1 -> 1L, transactionalId2 -> 2L)
var txnMetadata1: TransactionMetadata = transactionMetadata(transactionalId1, producerIds(transactionalId1))
var txnMetadata2: TransactionMetadata = transactionMetadata(transactionalId2, producerIds(transactionalId2))
var expectedError: Errors = Errors.NONE
@Before
def setUp() {
// make sure the transactional id hashes to the assigning partition id
assertEquals(partitionId, transactionManager.partitionFor(transactionalId1))
assertEquals(partitionId, transactionManager.partitionFor(transactionalId2))
}
@After
def tearDown() {
EasyMock.reset(zkClient, replicaManager)
transactionManager.shutdown()
}
@Test
def testValidateTransactionTimeout() {
assertTrue(transactionManager.validateTransactionTimeoutMs(1))
assertFalse(transactionManager.validateTransactionTimeoutMs(-1))
assertFalse(transactionManager.validateTransactionTimeoutMs(0))
assertTrue(transactionManager.validateTransactionTimeoutMs(txnConfig.transactionMaxTimeoutMs))
assertFalse(transactionManager.validateTransactionTimeoutMs(txnConfig.transactionMaxTimeoutMs + 1))
}
@Test
def testAddGetPids() {
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
assertEquals(Right(None), transactionManager.getTransactionState(transactionalId1))
assertEquals(Right(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1)),
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1))
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))),
transactionManager.getTransactionState(transactionalId1))
assertEquals(Right(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1)),
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata2))
}
@Test
def testLoadAndRemoveTransactionsForPartition() {
// generate transaction log messages for two pids traces:
// pid1's transaction started with two partitions
txnMetadata1.state = Ongoing
txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1)))
txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit()))
// pid1's transaction adds three more partitions
txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic2", 0),
new TopicPartition("topic2", 1),
new TopicPartition("topic2", 2)))
txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit()))
// pid1's transaction is preparing to commit
txnMetadata1.state = PrepareCommit
txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit()))
// pid2's transaction started with three partitions
txnMetadata2.state = Ongoing
txnMetadata2.addPartitions(Set[TopicPartition](new TopicPartition("topic3", 0),
new TopicPartition("topic3", 1),
new TopicPartition("topic3", 2)))
txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit()))
// pid2's transaction is preparing to abort
txnMetadata2.state = PrepareAbort
txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit()))
// pid2's transaction has aborted
txnMetadata2.state = CompleteAbort
txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit()))
// pid2's epoch has advanced, with no ongoing transaction yet
txnMetadata2.state = Empty
txnMetadata2.topicPartitions.clear()
txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit()))
val startOffset = 15L // it should work for any start offset
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE, txnRecords: _*)
prepareTxnLog(topicPartition, startOffset, records)
// this partition should not be part of the owned partitions
transactionManager.getTransactionState(transactionalId1).fold(
err => assertEquals(Errors.NOT_COORDINATOR, err),
_ => fail(transactionalId1 + "'s transaction state is already in the cache")
)
transactionManager.getTransactionState(transactionalId2).fold(
err => assertEquals(Errors.NOT_COORDINATOR, err),
_ => fail(transactionalId2 + "'s transaction state is already in the cache")
)
transactionManager.loadTransactionsForTxnTopicPartition(partitionId, 0, (_, _, _, _, _) => ())
// let the time advance to trigger the background thread loading
scheduler.tick()
transactionManager.getTransactionState(transactionalId1).fold(
err => fail(transactionalId1 + "'s transaction state access returns error " + err),
entry => entry.getOrElse(fail(transactionalId1 + "'s transaction state was not loaded into the cache"))
)
val cachedPidMetadata1 = transactionManager.getTransactionState(transactionalId1).fold(
err => fail(transactionalId1 + "'s transaction state access returns error " + err),
entry => entry.getOrElse(fail(transactionalId1 + "'s transaction state was not loaded into the cache"))
)
val cachedPidMetadata2 = transactionManager.getTransactionState(transactionalId2).fold(
err => fail(transactionalId2 + "'s transaction state access returns error " + err),
entry => entry.getOrElse(fail(transactionalId2 + "'s transaction state was not loaded into the cache"))
)
// they should be equal to the latest status of the transaction
assertEquals(txnMetadata1, cachedPidMetadata1.transactionMetadata)
assertEquals(txnMetadata2, cachedPidMetadata2.transactionMetadata)
transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch)
// let the time advance to trigger the background thread removing
scheduler.tick()
transactionManager.getTransactionState(transactionalId1).fold(
err => assertEquals(Errors.NOT_COORDINATOR, err),
_ => fail(transactionalId1 + "'s transaction state is still in the cache")
)
transactionManager.getTransactionState(transactionalId2).fold(
err => assertEquals(Errors.NOT_COORDINATOR, err),
_ => fail(transactionalId2 + "'s transaction state is still in the cache")
)
}
@Test
def testCompleteTransitionWhenAppendSucceeded(): Unit = {
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
// first insert the initial transaction metadata
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
prepareForTxnMessageAppend(Errors.NONE)
expectedError = Errors.NONE
// update the metadata to ongoing with two partitions
val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1)), time.milliseconds())
// append the new metadata into log
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch, newMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
}
@Test
def testAppendFailToCoordinatorNotAvailableError(): Unit = {
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
expectedError = Errors.COORDINATOR_NOT_AVAILABLE
var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.REQUEST_TIMED_OUT)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
}
@Test
def testAppendFailToNotCoordinatorError(): Unit = {
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
expectedError = Errors.NOT_COORDINATOR
var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.NOT_LEADER_FOR_PARTITION)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.NONE)
transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
prepareForTxnMessageAppend(Errors.NONE)
transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch)
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch + 1, new Pool[String, TransactionMetadata]())
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
prepareForTxnMessageAppend(Errors.NONE)
transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch)
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
}
@Test
def testAppendFailToCoordinatorLoadingError(): Unit = {
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
expectedError = Errors.COORDINATOR_LOAD_IN_PROGRESS
val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.NONE)
transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch)
transactionManager.addLoadingPartition(partitionId, coordinatorEpoch + 1)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
}
@Test
def testAppendFailToUnknownError() {
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
expectedError = Errors.UNKNOWN_SERVER_ERROR
var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.MESSAGE_TOO_LARGE)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.RECORD_LIST_TOO_LARGE)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertTrue(txnMetadata1.pendingState.isEmpty)
}
@Test
def testPendingStateNotResetOnRetryAppend() {
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
expectedError = Errors.COORDINATOR_NOT_AVAILABLE
val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds())
prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION)
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, _ => true)
assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1))
assertEquals(Some(Ongoing), txnMetadata1.pendingState)
}
@Test
def testAppendTransactionToLogWhileProducerFenced() = {
transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]())
// first insert the initial transaction metadata
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
prepareForTxnMessageAppend(Errors.NONE)
expectedError = Errors.NOT_COORDINATOR
val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1)), time.milliseconds())
// modify the cache while trying to append the new metadata
txnMetadata1.producerEpoch = (txnMetadata1.producerEpoch + 1).toShort
// append the new metadata into log
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, newMetadata, assertCallback)
}
@Test(expected = classOf[IllegalStateException])
def testAppendTransactionToLogWhilePendingStateChanged() = {
// first insert the initial transaction metadata
transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]())
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
prepareForTxnMessageAppend(Errors.NONE)
expectedError = Errors.INVALID_PRODUCER_EPOCH
val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1)), time.milliseconds())
// modify the cache while trying to append the new metadata
txnMetadata1.pendingState = None
// append the new metadata into log
transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, newMetadata, assertCallback)
}
@Test
def shouldReturnNotCooridnatorErrorIfTransactionIdPartitionNotOwned(): Unit = {
transactionManager.getTransactionState(transactionalId1).fold(
err => assertEquals(Errors.NOT_COORDINATOR, err),
_ => fail(transactionalId1 + "'s transaction state is already in the cache")
)
}
@Test
def shouldOnlyConsiderTransactionsInTheOngoingStateToAbort(): Unit = {
for (partitionId <- 0 until numPartitions) {
transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]())
}
transactionManager.putTransactionStateIfNotExists("ongoing", transactionMetadata("ongoing", producerId = 0, state = Ongoing))
transactionManager.putTransactionStateIfNotExists("not-expiring", transactionMetadata("not-expiring", producerId = 1, state = Ongoing, txnTimeout = 10000))
transactionManager.putTransactionStateIfNotExists("prepare-commit", transactionMetadata("prepare-commit", producerId = 2, state = PrepareCommit))
transactionManager.putTransactionStateIfNotExists("prepare-abort", transactionMetadata("prepare-abort", producerId = 3, state = PrepareAbort))
transactionManager.putTransactionStateIfNotExists("complete-commit", transactionMetadata("complete-commit", producerId = 4, state = CompleteCommit))
transactionManager.putTransactionStateIfNotExists("complete-abort", transactionMetadata("complete-abort", producerId = 5, state = CompleteAbort))
time.sleep(2000)
val expiring = transactionManager.timedOutTransactions()
assertEquals(List(TransactionalIdAndProducerIdEpoch("ongoing", 0, 0)), expiring)
}
@Test
def shouldWriteTxnMarkersForTransactionInPreparedCommitState(): Unit = {
verifyWritesTxnMarkersInPrepareState(PrepareCommit)
}
@Test
def shouldWriteTxnMarkersForTransactionInPreparedAbortState(): Unit = {
verifyWritesTxnMarkersInPrepareState(PrepareAbort)
}
@Test
def shouldRemoveCompleteCommmitExpiredTransactionalIds(): Unit = {
setupAndRunTransactionalIdExpiration(Errors.NONE, CompleteCommit)
verifyMetadataDoesntExist(transactionalId1)
verifyMetadataDoesExistAndIsUsable(transactionalId2)
}
@Test
def shouldRemoveCompleteAbortExpiredTransactionalIds(): Unit = {
setupAndRunTransactionalIdExpiration(Errors.NONE, CompleteAbort)
verifyMetadataDoesntExist(transactionalId1)
verifyMetadataDoesExistAndIsUsable(transactionalId2)
}
@Test
def shouldRemoveEmptyExpiredTransactionalIds(): Unit = {
setupAndRunTransactionalIdExpiration(Errors.NONE, Empty)
verifyMetadataDoesntExist(transactionalId1)
verifyMetadataDoesExistAndIsUsable(transactionalId2)
}
@Test
def shouldNotRemoveExpiredTransactionalIdsIfLogAppendFails(): Unit = {
setupAndRunTransactionalIdExpiration(Errors.NOT_ENOUGH_REPLICAS, CompleteAbort)
verifyMetadataDoesExistAndIsUsable(transactionalId1)
verifyMetadataDoesExistAndIsUsable(transactionalId2)
}
@Test
def shouldNotRemoveOngoingTransactionalIds(): Unit = {
setupAndRunTransactionalIdExpiration(Errors.NONE, Ongoing)
verifyMetadataDoesExistAndIsUsable(transactionalId1)
verifyMetadataDoesExistAndIsUsable(transactionalId2)
}
@Test
def shouldNotRemovePrepareAbortTransactionalIds(): Unit = {
setupAndRunTransactionalIdExpiration(Errors.NONE, PrepareAbort)
verifyMetadataDoesExistAndIsUsable(transactionalId1)
verifyMetadataDoesExistAndIsUsable(transactionalId2)
}
@Test
def shouldNotRemovePrepareCommitTransactionalIds(): Unit = {
setupAndRunTransactionalIdExpiration(Errors.NONE, PrepareCommit)
verifyMetadataDoesExistAndIsUsable(transactionalId1)
verifyMetadataDoesExistAndIsUsable(transactionalId2)
}
private def verifyMetadataDoesExistAndIsUsable(transactionalId: String) = {
transactionManager.getTransactionState(transactionalId) match {
case Left(errors) => fail("shouldn't have been any errors")
case Right(None) => fail("metadata should have been removed")
case Right(Some(metadata)) =>
assertTrue("metadata shouldn't be in a pending state", metadata.transactionMetadata.pendingState.isEmpty)
}
}
private def verifyMetadataDoesntExist(transactionalId: String) = {
transactionManager.getTransactionState(transactionalId) match {
case Left(errors) => fail("shouldn't have been any errors")
case Right(Some(metdata)) => fail("metadata should have been removed")
case Right(None) => // ok
}
}
private def setupAndRunTransactionalIdExpiration(error: Errors, txnState: TransactionState) = {
for (partitionId <- 0 until numPartitions) {
transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]())
}
val capturedArgument: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
val partition = new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, transactionManager.partitionFor(transactionalId1))
val recordsByPartition = Map(partition -> MemoryRecords.withRecords(TransactionLog.EnforcedCompressionType,
new SimpleRecord(time.milliseconds() + txnConfig.removeExpiredTransactionalIdsIntervalMs, TransactionLog.keyToBytes(transactionalId1), null)))
txnState match {
case Empty | CompleteCommit | CompleteAbort =>
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.eq((-1).toShort),
EasyMock.eq(true),
EasyMock.eq(false),
EasyMock.eq(recordsByPartition),
EasyMock.capture(capturedArgument),
EasyMock.anyObject().asInstanceOf[Option[ReentrantLock]],
EasyMock.anyObject()
)).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
capturedArgument.getValue.apply(
Map(partition ->
new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L)
)
)
}
})
case _ => // shouldn't append
}
EasyMock.replay(replicaManager)
txnMetadata1.txnLastUpdateTimestamp = time.milliseconds() - txnConfig.transactionalIdExpirationMs
txnMetadata1.state = txnState
transactionManager.putTransactionStateIfNotExists(transactionalId1, txnMetadata1)
txnMetadata2.txnLastUpdateTimestamp = time.milliseconds()
transactionManager.putTransactionStateIfNotExists(transactionalId2, txnMetadata2)
transactionManager.enableTransactionalIdExpiration()
time.sleep(txnConfig.removeExpiredTransactionalIdsIntervalMs)
scheduler.tick()
EasyMock.verify(replicaManager)
}
private def verifyWritesTxnMarkersInPrepareState(state: TransactionState): Unit = {
txnMetadata1.state = state
txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1)))
txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit()))
val startOffset = 0L
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE, txnRecords: _*)
prepareTxnLog(topicPartition, 0, records)
var txnId: String = null
def rememberTxnMarkers(transactionalId: String,
coordinatorEpoch: Int,
command: TransactionResult,
metadata: TransactionMetadata,
newMetadata: TxnTransitMetadata): Unit = {
txnId = transactionalId
}
transactionManager.loadTransactionsForTxnTopicPartition(partitionId, 0, rememberTxnMarkers)
scheduler.tick()
assertEquals(transactionalId1, txnId)
}
private def assertCallback(error: Errors): Unit = {
assertEquals(expectedError, error)
}
private def transactionMetadata(transactionalId: String,
producerId: Long,
state: TransactionState = Empty,
txnTimeout: Int = transactionTimeoutMs): TransactionMetadata = {
TransactionMetadata(transactionalId, producerId, 0.toShort, txnTimeout, state, time.milliseconds())
}
private def prepareTxnLog(topicPartition: TopicPartition,
startOffset: Long,
records: MemoryRecords): Unit = {
EasyMock.reset(replicaManager)
val logMock: Log = EasyMock.mock(classOf[Log])
val fileRecordsMock: FileRecords = EasyMock.mock(classOf[FileRecords])
val endOffset = startOffset + records.records.asScala.size
EasyMock.expect(replicaManager.getLog(topicPartition)).andStubReturn(Some(logMock))
EasyMock.expect(replicaManager.getLogEndOffset(topicPartition)).andStubReturn(Some(endOffset))
EasyMock.expect(logMock.logStartOffset).andStubReturn(startOffset)
EasyMock.expect(logMock.read(EasyMock.eq(startOffset),
maxLength = EasyMock.anyInt(),
maxOffset = EasyMock.eq(None),
minOneMessage = EasyMock.eq(true),
includeAbortedTxns = EasyMock.eq(false)))
.andReturn(FetchDataInfo(LogOffsetMetadata(startOffset), fileRecordsMock))
EasyMock.expect(fileRecordsMock.sizeInBytes()).andStubReturn(records.sizeInBytes)
val bufferCapture = EasyMock.newCapture[ByteBuffer]
fileRecordsMock.readInto(EasyMock.capture(bufferCapture), EasyMock.anyInt())
EasyMock.expectLastCall().andAnswer(new IAnswer[Unit] {
override def answer: Unit = {
val buffer = bufferCapture.getValue
buffer.put(records.buffer.duplicate)
buffer.flip()
}
})
EasyMock.replay(logMock, fileRecordsMock, replicaManager)
}
private def prepareForTxnMessageAppend(error: Errors): Unit = {
EasyMock.reset(replicaManager)
val capturedArgument: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
internalTopicsAllowed = EasyMock.eq(true),
isFromClient = EasyMock.eq(false),
EasyMock.anyObject().asInstanceOf[Map[TopicPartition, MemoryRecords]],
EasyMock.capture(capturedArgument),
EasyMock.anyObject().asInstanceOf[Option[ReentrantLock]],
EasyMock.anyObject())
).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = capturedArgument.getValue.apply(
Map(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) ->
new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L)
)
)
}
)
EasyMock.expect(replicaManager.getMagic(EasyMock.anyObject()))
.andStubReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.replay(replicaManager)
}
}
|
KevinLiLu/kafka
|
core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala
|
Scala
|
apache-2.0
| 30,433 |
package controllers
import play.api.i18n.Lang
import play.api.mvc._
import scala.concurrent.duration._
import scala.util.chaining._
import lila.app._
import lila.game.Pov
import lila.user.{ User => UserModel }
// both bot & board APIs
final class PlayApi(
env: Env,
apiC: => Api
)(implicit
mat: akka.stream.Materializer
) extends LilaController(env) {
implicit private def autoReqLang(implicit req: RequestHeader) = reqLang(req)
// bot endpoints
def botGameStream(id: String) =
Scoped(_.Bot.Play) { implicit req => me =>
WithPovAsBot(id, me) { impl.gameStream(me, _) }
}
def botMove(id: String, uci: String, offeringDraw: Option[Boolean]) =
Scoped(_.Bot.Play) { _ => me =>
WithPovAsBot(id, me) { impl.move(me, _, uci, offeringDraw) }
}
def botCommand(cmd: String) =
ScopedBody(_.Bot.Play) { implicit req => me =>
cmd.split('/') match {
case Array("account", "upgrade") =>
env.user.repo.isManaged(me.id) flatMap {
case true => notFoundJson()
case _ =>
env.tournament.api.withdrawAll(me) >>
env.team.cached.teamIdsList(me.id).flatMap { env.swiss.api.withdrawAll(me, _) } >>
env.user.repo.setBot(me) >>
env.pref.api.setBot(me) >>
env.streamer.api.delete(me) >>-
env.user.lightUserApi.invalidate(me.id) pipe
toResult recover { case lila.base.LilaInvalid(msg) =>
BadRequest(jsonError(msg))
}
}
case _ => impl.command(me, cmd)(WithPovAsBot)
}
}
// board endpoints
def boardGameStream(id: String) =
Scoped(_.Board.Play) { implicit req => me =>
WithPovAsBoard(id, me) { impl.gameStream(me, _) }
}
def boardMove(id: String, uci: String, offeringDraw: Option[Boolean]) =
Scoped(_.Board.Play) { _ => me =>
WithPovAsBoard(id, me) {
impl.move(me, _, uci, offeringDraw)
}
}
def boardCommandPost(cmd: String) =
ScopedBody(_.Board.Play) { implicit req => me =>
impl.command(me, cmd)(WithPovAsBoard)
}
// common code for bot & board APIs
private object impl {
def gameStream(me: UserModel, pov: Pov)(implicit lang: Lang) =
env.game.gameRepo.withInitialFen(pov.game) map { wf =>
apiC.sourceToNdJsonOption(env.bot.gameStateStream(wf, pov.color, me))
}
def move(me: UserModel, pov: Pov, uci: String, offeringDraw: Option[Boolean]) =
env.bot.player(pov, me, uci, offeringDraw) pipe toResult
def command(me: UserModel, cmd: String)(
as: (String, UserModel) => (Pov => Fu[Result]) => Fu[Result]
)(implicit req: Request[_]): Fu[Result] =
cmd.split('/') match {
case Array("game", id, "chat") =>
as(id, me) { pov =>
env.bot.form.chat
.bindFromRequest()
.fold(
jsonFormErrorDefaultLang,
res => env.bot.player.chat(pov.gameId, me, res) inject jsonOkResult
) pipe catchClientError
}
case Array("game", id, "abort") =>
as(id, me) { pov =>
env.bot.player.abort(pov) pipe toResult
}
case Array("game", id, "resign") =>
as(id, me) { pov =>
env.bot.player.resign(pov) pipe toResult
}
case Array("game", id, "draw", bool) =>
as(id, me) { pov =>
fuccess(env.bot.player.setDraw(pov, lila.common.Form.trueish(bool))) pipe toResult
}
case Array("game", id, "takeback", bool) =>
as(id, me) { pov =>
fuccess(env.bot.player.setTakeback(pov, lila.common.Form.trueish(bool))) pipe toResult
}
case Array("game", id, "claim-victory") =>
as(id, me) { pov =>
env.bot.player.claimVictory(pov) pipe toResult
}
case _ => notFoundJson("No such command")
}
}
def boardCommandGet(cmd: String) =
ScopedBody(_.Board.Play) { implicit req => me =>
cmd.split('/') match {
case Array("game", id, "chat") =>
WithPovAsBoard(id, me) { pov =>
env.chat.api.userChat.find(lila.chat.Chat.Id(pov.game.id)) map
lila.chat.JsonView.boardApi map JsonOk
}
case _ => notFoundJson("No such command")
}
}
// utils
private def toResult(f: Funit): Fu[Result] = catchClientError(f inject jsonOkResult)
private def catchClientError(f: Fu[Result]): Fu[Result] =
f recover { case e: lila.round.BenignError =>
BadRequest(jsonError(e.getMessage))
}
private def WithPovAsBot(anyId: String, me: lila.user.User)(f: Pov => Fu[Result]) =
WithPov(anyId, me) { pov =>
if (me.noBot)
BadRequest(
jsonError(
"This endpoint can only be used with a Bot account. See https://lichess.org/api#operation/botAccountUpgrade"
)
).fuccess
else if (!lila.game.Game.isBotCompatible(pov.game))
BadRequest(jsonError("This game cannot be played with the Bot API.")).fuccess
else f(pov)
}
private def WithPovAsBoard(anyId: String, me: lila.user.User)(f: Pov => Fu[Result]) =
WithPov(anyId, me) { pov =>
if (me.isBot) notForBotAccounts.fuccess
else if (!lila.game.Game.isBoardCompatible(pov.game))
BadRequest(jsonError("This game cannot be played with the Board API.")).fuccess
else f(pov)
}
private def WithPov(anyId: String, me: lila.user.User)(f: Pov => Fu[Result]) =
env.round.proxyRepo.game(lila.game.Game takeGameId anyId) flatMap {
case None => NotFound(jsonError("No such game")).fuccess
case Some(game) =>
Pov(game, me) match {
case None => NotFound(jsonError("Not your game")).fuccess
case Some(pov) => f(pov)
}
}
def botOnline =
Open { implicit ctx =>
env.user.repo.botsByIds(env.bot.onlineApiUsers.get) map { users =>
Ok(views.html.user.bots(users))
}
}
def botOnlineApi =
Action { implicit req =>
apiC.jsonStream {
env.user.repo
.botsByIdsCursor(env.bot.onlineApiUsers.get)
.documentSource(getInt("nb", req) | Int.MaxValue)
.throttle(50, 1 second)
.map { env.user.jsonView.full(_, withOnline = false, withRating = true) }
}
}
}
|
luanlv/lila
|
app/controllers/PlayApi.scala
|
Scala
|
mit
| 6,370 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.{TimeoutException, TimeUnit}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.concurrent.Future
import scala.concurrent.duration.{Duration, SECONDS}
import scala.language.existentials
import scala.reflect.ClassTag
import org.scalactic.TripleEquals
import org.scalatest.Assertions.AssertionsHelper
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.TaskState._
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.util.{CallSite, ThreadUtils, Utils}
/**
* Tests for the entire scheduler code -- DAGScheduler, TaskSchedulerImpl, TaskSets,
* TaskSetManagers.
*
* Test cases are configured by providing a set of jobs to submit, and then simulating interaction
* with spark's executors via a mocked backend (eg., task completion, task failure, executors
* disconnecting, etc.).
*/
abstract class SchedulerIntegrationSuite[T <: MockBackend: ClassTag] extends SparkFunSuite
with LocalSparkContext {
var taskScheduler: TestTaskScheduler = null
var scheduler: DAGScheduler = null
var backend: T = _
override def beforeEach(): Unit = {
if (taskScheduler != null) {
taskScheduler.runningTaskSets.clear()
}
results.clear()
failure = null
backendException.set(null)
super.beforeEach()
}
override def afterEach(): Unit = {
super.afterEach()
taskScheduler.stop()
backend.stop()
scheduler.stop()
}
def setupScheduler(conf: SparkConf): Unit = {
conf.setAppName(this.getClass().getSimpleName())
val backendClassName = implicitly[ClassTag[T]].runtimeClass.getName()
conf.setMaster(s"mock[${backendClassName}]")
sc = new SparkContext(conf)
backend = sc.schedulerBackend.asInstanceOf[T]
taskScheduler = sc.taskScheduler.asInstanceOf[TestTaskScheduler]
taskScheduler.initialize(sc.schedulerBackend)
scheduler = new DAGScheduler(sc, taskScheduler)
taskScheduler.setDAGScheduler(scheduler)
}
def testScheduler(name: String)(testBody: => Unit): Unit = {
testScheduler(name, Seq())(testBody)
}
def testScheduler(name: String, extraConfs: Seq[(String, String)])(testBody: => Unit): Unit = {
test(name) {
val conf = new SparkConf()
extraConfs.foreach{ case (k, v) => conf.set(k, v)}
setupScheduler(conf)
testBody
}
}
/**
* A map from partition to results for all tasks of a job when you call this test framework's
* [[submit]] method. Two important considerations:
*
* 1. If there is a job failure, results may or may not be empty. If any tasks succeed before
* the job has failed, they will get included in `results`. Instead, check for job failure by
* checking [[failure]]. (Also see `assertDataStructuresEmpty()`)
*
* 2. This only gets cleared between tests. So you'll need to do special handling if you submit
* more than one job in one test.
*/
val results = new HashMap[Int, Any]()
/**
* If a call to [[submit]] results in a job failure, this will hold the exception, else it will
* be null.
*
* As with [[results]], this only gets cleared between tests, so care must be taken if you are
* submitting more than one job in one test.
*/
var failure: Throwable = _
/**
* When we submit dummy Jobs, this is the compute function we supply.
*/
private val jobComputeFunc: (TaskContext, scala.Iterator[_]) => Any = {
(context: TaskContext, it: Iterator[(_)]) =>
throw new RuntimeException("jobComputeFunc shouldn't get called in this mock")
}
/** Submits a job to the scheduler, and returns a future which does a bit of error handling. */
protected def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc): Future[Any] = {
val waiter: JobWaiter[Any] = scheduler.submitJob(rdd, func, partitions.toSeq, CallSite("", ""),
(index, res) => results(index) = res, new Properties())
import scala.concurrent.ExecutionContext.Implicits.global
waiter.completionFuture.recover { case ex =>
failure = ex
}
}
/**
* Helper to run a few common asserts after a job has completed, in particular some internal
* datastructures for bookkeeping. This only does a very minimal check for whether the job
* failed or succeeded -- often you will want extra asserts on [[results]] or [[failure]].
*/
protected def assertDataStructuresEmpty(noFailure: Boolean = true): Unit = {
if (noFailure) {
if (failure != null) {
// if there is a job failure, it can be a bit hard to tease the job failure msg apart
// from the test failure msg, so we do a little extra formatting
val msg =
raw"""
| There was a failed job.
| ----- Begin Job Failure Msg -----
| ${Utils.exceptionString(failure)}
| ----- End Job Failure Msg ----
""".
stripMargin
fail(msg)
}
// When a job fails, we terminate before waiting for all the task end events to come in,
// so there might still be a running task set. So we only check these conditions
// when the job succeeds.
// When the final task of a taskset completes, we post
// the event to the DAGScheduler event loop before we finish processing in the taskscheduler
// thread. It's possible the DAGScheduler thread processes the event, finishes the job,
// and notifies the job waiter before our original thread in the task scheduler finishes
// handling the event and marks the taskset as complete. So its ok if we need to wait a
// *little* bit longer for the original taskscheduler thread to finish up to deal w/ the race.
eventually(timeout(1 second), interval(10 millis)) {
assert(taskScheduler.runningTaskSets.isEmpty)
}
assert(!backend.hasTasks)
} else {
assert(failure != null)
}
assert(scheduler.activeJobs.isEmpty)
assert(backendException.get() == null)
}
/**
* Looks at all shuffleMapOutputs that are dependencies of the given RDD, and makes sure
* they are all registered
*/
def assertMapOutputAvailable(targetRdd: MockRDD): Unit = {
val shuffleIds = targetRdd.shuffleDeps.map{_.shuffleId}
val nParts = targetRdd.numPartitions
for {
shuffleId <- shuffleIds
reduceIdx <- (0 until nParts)
} {
val statuses = taskScheduler.mapOutputTracker.getMapSizesByExecutorId(shuffleId, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
}
/** models a stage boundary with a single dependency, like a shuffle */
def shuffle(nParts: Int, input: MockRDD): MockRDD = {
val partitioner = new HashPartitioner(nParts)
val shuffleDep = new ShuffleDependency[Int, Int, Nothing](input, partitioner)
new MockRDD(sc, nParts, List(shuffleDep))
}
/** models a stage boundary with multiple dependencies, like a join */
def join(nParts: Int, inputs: MockRDD*): MockRDD = {
val partitioner = new HashPartitioner(nParts)
val shuffleDeps = inputs.map { inputRDD =>
new ShuffleDependency[Int, Int, Nothing](inputRDD, partitioner)
}
new MockRDD(sc, nParts, shuffleDeps)
}
val backendException = new AtomicReference[Exception](null)
/**
* Helper which makes it a little easier to setup a test, which starts a mock backend in another
* thread, responding to tasks with your custom function. You also supply the "body" of your
* test, where you submit jobs to your backend, wait for them to complete, then check
* whatever conditions you want. Note that this is *not* safe to all bad backends --
* in particular, your `backendFunc` has to return quickly, it can't throw errors, (instead
* it should send back the right TaskEndReason)
*/
def withBackend[T](backendFunc: () => Unit)(testBody: => T): T = {
val backendContinue = new AtomicBoolean(true)
val backendThread = new Thread("mock backend thread") {
override def run(): Unit = {
while (backendContinue.get()) {
if (backend.hasTasksWaitingToRun) {
try {
backendFunc()
} catch {
case ex: Exception =>
// Try to do a little error handling around exceptions that might occur here --
// otherwise it can just look like a TimeoutException in the test itself.
logError("Exception in mock backend:", ex)
backendException.set(ex)
backendContinue.set(false)
throw ex
}
} else {
Thread.sleep(10)
}
}
}
}
try {
backendThread.start()
testBody
} finally {
backendContinue.set(false)
backendThread.join()
}
}
/**
* Helper to do a little extra error checking while waiting for the job to terminate. Primarily
* just does a little extra error handling if there is an exception from the backend.
*/
def awaitJobTermination(jobFuture: Future[_], duration: Duration): Unit = {
try {
ThreadUtils.awaitReady(jobFuture, duration)
} catch {
case te: TimeoutException if backendException.get() != null =>
val msg = raw"""
| ----- Begin Backend Failure Msg -----
| ${Utils.exceptionString(backendException.get())}
| ----- End Backend Failure Msg ----
""".
stripMargin
fail(s"Future timed out after ${duration}, likely because of failure in backend: $msg")
}
}
}
/**
* Helper for running a backend in integration tests, does a bunch of the book-keeping
* so individual tests can focus on just responding to tasks. Individual tests will use
* [[beginTask]], [[taskSuccess]], and [[taskFailed]].
*/
private[spark] abstract class MockBackend(
conf: SparkConf,
val taskScheduler: TaskSchedulerImpl) extends SchedulerBackend with Logging {
// Periodically revive offers to allow delay scheduling to work
private val reviveThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-revive-thread")
private val reviveIntervalMs = conf.getTimeAsMs("spark.scheduler.revive.interval", "10ms")
/**
* Test backends should call this to get a task that has been assigned to them by the scheduler.
* Each task should be responded to with either [[taskSuccess]] or [[taskFailed]].
*/
def beginTask(): (TaskDescription, Task[_]) = {
synchronized {
val toRun = assignedTasksWaitingToRun.remove(assignedTasksWaitingToRun.size - 1)
runningTasks += toRun._1.taskId
toRun
}
}
/**
* Tell the scheduler the task completed successfully, with the given result. Also
* updates some internal state for this mock.
*/
def taskSuccess(task: TaskDescription, result: Any): Unit = {
val ser = env.serializer.newInstance()
val resultBytes = ser.serialize(result)
val directResult = new DirectTaskResult(resultBytes, Seq()) // no accumulator updates
taskUpdate(task, TaskState.FINISHED, directResult)
}
/**
* Tell the scheduler the task failed, with the given state and result (probably ExceptionFailure
* or FetchFailed). Also updates some internal state for this mock.
*/
def taskFailed(task: TaskDescription, exc: Exception): Unit = {
taskUpdate(task, TaskState.FAILED, new ExceptionFailure(exc, Seq()))
}
def taskFailed(task: TaskDescription, reason: TaskFailedReason): Unit = {
taskUpdate(task, TaskState.FAILED, reason)
}
def taskUpdate(task: TaskDescription, state: TaskState, result: Any): Unit = {
val ser = env.serializer.newInstance()
val resultBytes = ser.serialize(result)
// statusUpdate is safe to call from multiple threads, its protected inside taskScheduler
taskScheduler.statusUpdate(task.taskId, state, resultBytes)
if (TaskState.isFinished(state)) {
synchronized {
runningTasks -= task.taskId
executorIdToExecutor(task.executorId).freeCores += taskScheduler.CPUS_PER_TASK
freeCores += taskScheduler.CPUS_PER_TASK
}
reviveOffers()
}
}
// protected by this
private val assignedTasksWaitingToRun = new ArrayBuffer[(TaskDescription, Task[_])](10000)
// protected by this
private val runningTasks = HashSet[Long]()
def hasTasks: Boolean = synchronized {
assignedTasksWaitingToRun.nonEmpty || runningTasks.nonEmpty
}
def hasTasksWaitingToRun: Boolean = {
assignedTasksWaitingToRun.nonEmpty
}
override def start(): Unit = {
reviveThread.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
reviveOffers()
}
}, 0, reviveIntervalMs, TimeUnit.MILLISECONDS)
}
override def stop(): Unit = {
reviveThread.shutdown()
}
val env = SparkEnv.get
/** Accessed by both scheduling and backend thread, so should be protected by this. */
var freeCores: Int = _
/**
* Accessed by both scheduling and backend thread, so should be protected by this.
* Most likely the only thing that needs to be protected are the inidividual ExecutorTaskStatus,
* but for simplicity in this mock just lock the whole backend.
*/
def executorIdToExecutor: Map[String, ExecutorTaskStatus]
private def generateOffers(): IndexedSeq[WorkerOffer] = {
executorIdToExecutor.values.filter { exec =>
exec.freeCores > 0
}.map { exec =>
WorkerOffer(executorId = exec.executorId, host = exec.host,
cores = exec.freeCores)
}.toIndexedSeq
}
/**
* This is called by the scheduler whenever it has tasks it would like to schedule, when a tasks
* completes (which will be in a result-getter thread), and by the reviveOffers thread for delay
* scheduling.
*/
override def reviveOffers(): Unit = {
// Need a lock on the entire scheduler to protect freeCores -- otherwise, multiple threads
// may make offers at the same time, though they are using the same set of freeCores.
taskScheduler.synchronized {
val newTaskDescriptions = taskScheduler.resourceOffers(generateOffers()).flatten
// get the task now, since that requires a lock on TaskSchedulerImpl, to prevent individual
// tests from introducing a race if they need it.
val newTasks = newTaskDescriptions.map { taskDescription =>
val taskSet = taskScheduler.taskIdToTaskSetManager(taskDescription.taskId).taskSet
val task = taskSet.tasks(taskDescription.index)
(taskDescription, task)
}
newTasks.foreach { case (taskDescription, _) =>
executorIdToExecutor(taskDescription.executorId).freeCores -= taskScheduler.CPUS_PER_TASK
}
freeCores -= newTasks.size * taskScheduler.CPUS_PER_TASK
assignedTasksWaitingToRun ++= newTasks
}
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String): Unit = {
// We have to implement this b/c of SPARK-15385.
// Its OK for this to be a no-op, because even if a backend does implement killTask,
// it really can only be "best-effort" in any case, and the scheduler should be robust to that.
// And in fact its reasonably simulating a case where a real backend finishes tasks in between
// the time when the scheduler sends the msg to kill tasks, and the backend receives the msg.
}
}
/**
* A very simple mock backend that can just run one task at a time.
*/
private[spark] class SingleCoreMockBackend(
conf: SparkConf,
taskScheduler: TaskSchedulerImpl) extends MockBackend(conf, taskScheduler) {
val cores = 1
override def defaultParallelism(): Int = conf.getInt("spark.default.parallelism", cores)
freeCores = cores
val localExecutorId = SparkContext.DRIVER_IDENTIFIER
val localExecutorHostname = "localhost"
override val executorIdToExecutor: Map[String, ExecutorTaskStatus] = Map(
localExecutorId -> new ExecutorTaskStatus(localExecutorHostname, localExecutorId, freeCores)
)
}
case class ExecutorTaskStatus(host: String, executorId: String, var freeCores: Int)
class MockRDD(
sc: SparkContext,
val numPartitions: Int,
val shuffleDeps: Seq[ShuffleDependency[Int, Int, Nothing]]
) extends RDD[(Int, Int)](sc, shuffleDeps) with Serializable {
MockRDD.validate(numPartitions, shuffleDeps)
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions: Array[Partition] = {
(0 until numPartitions).map(i => new Partition {
override def index: Int = i
}).toArray
}
override def getPreferredLocations(split: Partition): Seq[String] = Nil
override def toString: String = "MockRDD " + id
}
object MockRDD extends AssertionsHelper with TripleEquals {
/**
* make sure all the shuffle dependencies have a consistent number of output partitions
* (mostly to make sure the test setup makes sense, not that Spark itself would get this wrong)
*/
def validate(numPartitions: Int, dependencies: Seq[ShuffleDependency[_, _, _]]): Unit = {
dependencies.foreach { dependency =>
val partitioner = dependency.partitioner
assert(partitioner != null)
assert(partitioner.numPartitions === numPartitions)
}
}
}
/** Simple cluster manager that wires up our mock backend. */
private class MockExternalClusterManager extends ExternalClusterManager {
val MOCK_REGEX = """mock\\[(.*)\\]""".r
def canCreate(masterURL: String): Boolean = MOCK_REGEX.findFirstIn(masterURL).isDefined
def createTaskScheduler(
sc: SparkContext,
masterURL: String): TaskScheduler = {
new TestTaskScheduler(sc)
}
def createSchedulerBackend(
sc: SparkContext,
masterURL: String,
scheduler: TaskScheduler): SchedulerBackend = {
masterURL match {
case MOCK_REGEX(backendClassName) =>
val backendClass = Utils.classForName(backendClassName)
val ctor = backendClass.getConstructor(classOf[SparkConf], classOf[TaskSchedulerImpl])
ctor.newInstance(sc.getConf, scheduler).asInstanceOf[SchedulerBackend]
}
}
def initialize(scheduler: TaskScheduler, backend: SchedulerBackend): Unit = {
scheduler.asInstanceOf[TaskSchedulerImpl].initialize(backend)
}
}
/** TaskSchedulerImpl that just tracks a tiny bit more state to enable checks in tests. */
class TestTaskScheduler(sc: SparkContext) extends TaskSchedulerImpl(sc) {
/** Set of TaskSets the DAGScheduler has requested executed. */
val runningTaskSets = HashSet[TaskSet]()
override def submitTasks(taskSet: TaskSet): Unit = {
runningTaskSets += taskSet
super.submitTasks(taskSet)
}
override def taskSetFinished(manager: TaskSetManager): Unit = {
runningTaskSets -= manager.taskSet
super.taskSetFinished(manager)
}
}
/**
* Some very basic tests just to demonstrate the use of the test framework (and verify that it
* works).
*/
class BasicSchedulerIntegrationSuite extends SchedulerIntegrationSuite[SingleCoreMockBackend] {
/**
* Very simple one stage job. Backend successfully completes each task, one by one
*/
testScheduler("super simple job") {
def runBackend(): Unit = {
val (taskDescripition, _) = backend.beginTask()
backend.taskSuccess(taskDescripition, 42)
}
withBackend(runBackend _) {
val jobFuture = submit(new MockRDD(sc, 10, Nil), (0 until 10).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
}
assert(results === (0 until 10).map { _ -> 42 }.toMap)
assertDataStructuresEmpty()
}
/**
* 5 stage job, diamond dependencies.
*
* a ----> b ----> d --> result
* \\--> c --/
*
* Backend successfully completes each task
*/
testScheduler("multi-stage job") {
def shuffleIdToOutputParts(shuffleId: Int): Int = {
shuffleId match {
case 0 => 10
case 1 => 20
case _ => 30
}
}
val a = new MockRDD(sc, 2, Nil)
val b = shuffle(10, a)
val c = shuffle(20, a)
val d = join(30, b, c)
def runBackend(): Unit = {
val (taskDescription, task) = backend.beginTask()
// make sure the required map output is available
task.stageId match {
case 4 => assertMapOutputAvailable(d)
case _ =>
// we can't check for the output for the two intermediate stages, unfortunately,
// b/c the stage numbering is non-deterministic, so stage number alone doesn't tell
// us what to check
}
(task.stageId, task.stageAttemptId, task.partitionId) match {
case (stage, 0, _) if stage < 4 =>
val shuffleId =
scheduler.stageIdToStage(stage).asInstanceOf[ShuffleMapStage].shuffleDep.shuffleId
backend.taskSuccess(taskDescription,
DAGSchedulerSuite.makeMapStatus("hostA", shuffleIdToOutputParts(shuffleId)))
case (4, 0, partition) =>
backend.taskSuccess(taskDescription, 4321 + partition)
}
}
withBackend(runBackend _) {
val jobFuture = submit(d, (0 until 30).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
}
assert(results === (0 until 30).map { idx => idx -> (4321 + idx) }.toMap)
assertDataStructuresEmpty()
}
/**
* 2 stage job, with a fetch failure. Make sure that:
* (a) map output is available whenever we run stage 1
* (b) we get a second attempt for stage 0 & stage 1
*/
testScheduler("job with fetch failure") {
val input = new MockRDD(sc, 2, Nil)
val shuffledRdd = shuffle(10, input)
val shuffleId = shuffledRdd.shuffleDeps.head.shuffleId
val stageToAttempts = new HashMap[Int, HashSet[Int]]()
def runBackend(): Unit = {
val (taskDescription, task) = backend.beginTask()
stageToAttempts.getOrElseUpdate(task.stageId, new HashSet()) += task.stageAttemptId
// We cannot check if shuffle output is available, because the failed fetch will clear the
// shuffle output. Then we'd have a race, between the already-started task from the first
// attempt, and when the failure clears out the map output status.
(task.stageId, task.stageAttemptId, task.partitionId) match {
case (0, _, _) =>
backend.taskSuccess(taskDescription, DAGSchedulerSuite.makeMapStatus("hostA", 10))
case (1, 0, 0) =>
val fetchFailed = FetchFailed(
DAGSchedulerSuite.makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored")
backend.taskFailed(taskDescription, fetchFailed)
case (1, _, partition) =>
backend.taskSuccess(taskDescription, 42 + partition)
case unmatched =>
fail(s"Unexpected shuffle output $unmatched")
}
}
withBackend(runBackend _) {
val jobFuture = submit(shuffledRdd, (0 until 10).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
}
assertDataStructuresEmpty()
assert(results === (0 until 10).map { idx => idx -> (42 + idx) }.toMap)
assert(stageToAttempts === Map(0 -> Set(0, 1), 1 -> Set(0, 1)))
}
testScheduler("job failure after 4 attempts") {
def runBackend(): Unit = {
val (taskDescription, _) = backend.beginTask()
backend.taskFailed(taskDescription, new RuntimeException("test task failure"))
}
withBackend(runBackend _) {
val jobFuture = submit(new MockRDD(sc, 10, Nil), (0 until 10).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
assert(failure.getMessage.contains("test task failure"))
}
assertDataStructuresEmpty(noFailure = false)
}
}
|
bravo-zhang/spark
|
core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
|
Scala
|
apache-2.0
| 24,906 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.sexp.formats
import collection.{ immutable => im }
import org.ensime.sexp._
// http://docs.scala-lang.org/overviews/collections/overview.html
class CollectionFormatsSpec extends FormatSpec
with ProductFormats with CollectionFormats with BasicFormats {
val foo = SexpString("foo")
val foos: List[String] = List("foo", "foo")
val expect = SexpList(foo, foo)
"CollectionFormats traits" should "support Traversable" in {
assertFormat(collection.Traversable[String](), SexpNil)
assertFormat(collection.Traversable(foos: _*), expect)
}
it should "support Iterable" in {
assertFormat(collection.Iterable[String](), SexpNil)
assertFormat(collection.Iterable(foos: _*), expect)
}
it should "support Seq" in {
assertFormat(collection.Seq[String](), SexpNil)
assertFormat(collection.Seq(foos: _*), expect)
}
it should "support IndexedSeq" in {
assertFormat(collection.IndexedSeq[String](), SexpNil)
assertFormat(collection.IndexedSeq(foos: _*), expect)
}
it should "support LinearSeq" in {
assertFormat(collection.LinearSeq[String](), SexpNil)
assertFormat(collection.LinearSeq(foos: _*), expect)
}
it should "support Set" in {
assertFormat(collection.Set[String](), SexpNil)
assertFormat(collection.Set(foos: _*), SexpList(foo)) // dupes removed
}
it should "support SortedSet" in {
assertFormat(collection.SortedSet[String](), SexpNil)
assertFormat(collection.SortedSet(foos: _*), SexpList(foo)) // dupes removed
}
it should "support BitSet" in {
assertFormat(collection.BitSet(), SexpNil)
assertFormat(collection.BitSet(0, 1), SexpString("16#3"))
assertFormat(collection.BitSet(64), SexpString("16#10000000000000000"))
assertFormat(collection.BitSet(0, 64), SexpString("16#10000000000000001"))
assertFormat(collection.BitSet(1, 64), SexpString("16#10000000000000002"))
}
it should "support Map" in {
assertFormat(collection.Map[String, String](), SexpNil)
assertFormat(collection.Map("foo" -> "foo"), SexpList(SexpList(foo, foo)))
}
it should "support SortedMap" in {
assertFormat(collection.SortedMap[String, String](), SexpNil)
assertFormat(collection.SortedMap("foo" -> "foo"), SexpList(SexpList(foo, foo)))
}
"CollectionFormats immutable variants of the traits" should "support Traversable" in {
assertFormat(im.Traversable[String](), SexpNil)
assertFormat(im.Traversable(foos: _*), expect)
}
it should "support Iterable" in {
assertFormat(im.Iterable[String](), SexpNil)
assertFormat(im.Iterable(foos: _*), expect)
}
it should "support Seq" in {
assertFormat(im.Seq[String](), SexpNil)
assertFormat(im.Seq(foos: _*), expect)
}
it should "support IndexedSeq" in {
assertFormat(im.IndexedSeq[String](), SexpNil)
assertFormat(im.IndexedSeq(foos: _*), expect)
}
it should "support LinearSeq" in {
assertFormat(im.LinearSeq[String](), SexpNil)
assertFormat(im.LinearSeq(foos: _*), expect)
}
it should "support Set" in {
assertFormat(im.Set[String](), SexpNil)
assertFormat(im.Set(foos: _*), SexpList(foo)) // dupes removed
}
it should "support SortedSet" in {
assertFormat(im.SortedSet[String](), SexpNil)
assertFormat(im.SortedSet(foos: _*), SexpList(foo)) // dupes removed
}
it should "support BitSet" in {
assertFormat(im.BitSet(), SexpNil)
assertFormat(im.BitSet(0, 1), SexpString("16#3"))
assertFormat(collection.BitSet(64), SexpString("16#10000000000000000"))
assertFormat(collection.BitSet(0, 64), SexpString("16#10000000000000001"))
assertFormat(collection.BitSet(1, 64), SexpString("16#10000000000000002"))
}
it should "support Map" in {
assertFormat(im.Map[String, String](), SexpNil)
assertFormat(im.Map("foo" -> "foo"), SexpList(SexpList(foo, foo)))
}
it should "support SortedMap" in {
assertFormat(im.SortedMap[String, String](), SexpNil)
assertFormat(im.SortedMap("foo" -> "foo"), SexpList(SexpList(foo, foo)))
}
"CollectionFormats immutable specific implementations" should "support im.List" in {
assertFormat(im.List[String](), SexpNil)
assertFormat(im.List(foos: _*), expect)
}
it should "support im.Vector" in {
assertFormat(im.Vector[String](), SexpNil)
assertFormat(im.Vector(foos: _*), expect)
}
it should "support im.Range" in {
assertFormat(
im.Range(-100, 100),
SexpList(
SexpSymbol(":start"), SexpNumber(-100),
SexpSymbol(":end"), SexpNumber(100),
SexpSymbol(":step"), SexpNumber(1)
)
)
assertFormat(
im.Range(-100, 100, 2),
SexpList(
SexpSymbol(":start"), SexpNumber(-100),
SexpSymbol(":end"), SexpNumber(100),
SexpSymbol(":step"), SexpNumber(2)
)
)
}
it should "support im.NumericRange" in {
implicit val DoubleIntegral: Numeric.DoubleAsIfIntegral.type = Numeric.DoubleAsIfIntegral
assertFormat(
-100.0 to 100.0 by 1.5,
SexpData(
SexpSymbol(":start") -> SexpNumber(-100),
SexpSymbol(":end") -> SexpNumber(100),
SexpSymbol(":step") -> SexpNumber(1.5),
SexpSymbol(":inclusive") -> SexpSymbol("t")
)
)
assertFormat(
-100.0 until 100.0 by 1.5,
SexpData(
SexpSymbol(":start") -> SexpNumber(-100),
SexpSymbol(":end") -> SexpNumber(100),
SexpSymbol(":step") -> SexpNumber(1.5),
SexpSymbol(":inclusive") -> SexpNil
)
)
}
}
|
sugakandrey/ensime-server
|
s-express/src/test/scala/org/ensime/sexp/formats/CollectionFormatsSpec.scala
|
Scala
|
gpl-3.0
| 5,640 |
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2010 Travis Brown, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.textgrounder.topo.gaz
import java.util.Iterator
import scala.collection.JavaConversions._
import opennlp.textgrounder.text.Corpus
import opennlp.textgrounder.text.Token
import opennlp.textgrounder.topo.Location
class CorpusGazetteerReader(private val corpus: Corpus[_ <: Token])
extends GazetteerReader {
private val it = corpus.flatMap(_.flatMap {
_.getToponyms.flatMap(_.getCandidates)
}).toIterator
def hasNext: Boolean = it.hasNext
def next: Location = it.next
def close() {
corpus.close()
}
}
|
tectronics/textgrounder
|
src/main/scala/opennlp/textgrounder/topo/gaz/CorpusGazetteerReader.scala
|
Scala
|
apache-2.0
| 1,341 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.connector
import scala.concurrent.Future
import org.apache.kafka.clients.producer.RecordMetadata
trait MessageProducer {
/** Count of messages sent. */
def sentCount(): Long
/** Sends msg to topic. This is an asynchronous operation. */
def send(topic: String, msg: Message): Future[RecordMetadata]
/** Closes producer. */
def close(): Unit
}
|
CrowdFlower/incubator-openwhisk
|
common/scala/src/main/scala/whisk/core/connector/MessageProducer.scala
|
Scala
|
apache-2.0
| 991 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.connector
import slamdata.Predef._
import quasar.Data
import quasar.contrib.pathy._
import quasar.effect._
import quasar.fs._
import scalaz._, Scalaz._
trait ManagedQueryFile[C] { self: BackendModule =>
import QueryFile._, FileSystemError._
def MonoSeqM: MonoSeq[M]
def ResultKvsM: Kvs[M, ResultHandle, C]
trait ManagedQueryFileModule {
def executePlan(repr: Repr, out: AFile): Backend[Unit]
def explain(repr: Repr): Backend[String]
def listContents(dir: ADir): Backend[Set[PathSegment]]
def fileExists(file: AFile): Configured[Boolean]
def resultsCursor(repr: Repr): Backend[C]
def nextChunk(c: C): Backend[(C, Vector[Data])]
def closeCursor(c: C): Configured[Unit]
}
def ManagedQueryFileModule: ManagedQueryFileModule
object QueryFileModule extends QueryFileModule {
private final implicit def _MonadM = MonadM
def executePlan(repr: Repr, out: AFile): Backend[Unit] =
ManagedQueryFileModule.executePlan(repr, out)
def explain(repr: Repr): Backend[String] =
ManagedQueryFileModule.explain(repr)
def listContents(dir: ADir): Backend[Set[PathSegment]] =
ManagedQueryFileModule.listContents(dir)
def fileExists(file: AFile): Configured[Boolean] =
ManagedQueryFileModule.fileExists(file)
def evaluatePlan(repr: Repr): Backend[ResultHandle] =
for {
id <- MonoSeqM.next.liftB
h = ResultHandle(id)
c <- ManagedQueryFileModule.resultsCursor(repr)
_ <- ResultKvsM.put(h, c).liftB
} yield h
def more(h: ResultHandle): Backend[Vector[Data]] =
for {
c0 <- ResultKvsM.get(h).liftB
c <- c0 getOrElseF unknownResultHandle(h).raiseError[Backend, C]
r <- ManagedQueryFileModule.nextChunk(c)
(c1, data) = r
_ <- ResultKvsM.put(h, c1).liftB
} yield data
def close(h: ResultHandle): Configured[Unit] =
OptionT(ResultKvsM.get(h).liftM[ConfiguredT])
.flatMapF(c =>
ManagedQueryFileModule.closeCursor(c) *>
ResultKvsM.delete(h).liftM[ConfiguredT])
.orZero
}
}
|
jedesah/Quasar
|
connector/src/main/scala/quasar/connector/ManagedQueryFile.scala
|
Scala
|
apache-2.0
| 2,718 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
import slamdata.Predef._
import quasar.{IdStatus, Qspec}
import quasar.common.data.{Data, DataGenerators}
import quasar.common.{JoinType, SortDir}
import quasar.contrib.cats.stateT._
import quasar.contrib.scalaz.{NonEmptyListE => NELE}
import quasar.frontend.logicalplan.{JoinCondition, LogicalPlan, LogicalPlanHelpers}
import quasar.qscript.{
Center,
Drop,
LeftSide,
LeftSide3,
MapFuncsCore,
PlannerError,
ReduceFuncs,
RightSide,
RightSide3,
Sample,
SrcHole,
Take
}
import quasar.qsu.{QScriptUniform => QSU}
import quasar.std.{
AggLib,
IdentityLib,
MathLib,
RelationsLib,
SetLib,
StringLib,
StructuralLib
}
import quasar.time.TemporalPart
import cats.Eval
import cats.data.StateT
import iotaz.CopK
import matryoshka.data.Fix
import org.specs2.matcher.{Expectable, Matcher, MatchResult}
import pathy.Path, Path.{file, Sandboxed}
import scalaz.{\\/, EitherT, NonEmptyList => NEL}
import scalaz.syntax.bifunctor._
import scalaz.syntax.show._
import shims.monadToScalaz
object ReadLPSpec extends Qspec with LogicalPlanHelpers with DataGenerators with QSUTTypes[Fix] {
import QSUGraph.Extractors._
import IdStatus.ExcludeId
type F[A] = EitherT[StateT[Eval, Long, ?], PlannerError, A]
val reader = ReadLP[Fix, F] _
val root = Path.rootDir[Sandboxed]
val IC = CopK.Inject[MapFuncCore, MapFunc]
val ID = CopK.Inject[MapFuncDerived, MapFunc]
"reading lp into qsu" should {
"convert Read nodes" in {
read("foobar") must readQsuAs {
case Read(path, ExcludeId) =>
path mustEqual (root </> file("foobar"))
}
}
// we can't do this as a property test because Data and EJson don't round-trip! :-(
"convert constant nodes" >> {
"Int" >> {
val data = Data.Int(42)
lpf.constant(data) must readQsuAs {
case DataConstant(`data`) => ok
}
}
"String" >> {
val data = Data.Str("foo")
lpf.constant(data) must readQsuAs {
case DataConstant(`data`) => ok
}
}
}
"convert FlattenMap" in {
lpf.invoke1(StructuralLib.FlattenMap, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Values, QSU.Rotation.FlattenMap) => ok
}
}
"convert FlattenMapKeys" in {
lpf.invoke1(StructuralLib.FlattenMapKeys, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Identities, QSU.Rotation.FlattenMap) => ok
}
}
"convert FlattenArray" in {
lpf.invoke1(StructuralLib.FlattenArray, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Values, QSU.Rotation.FlattenArray) => ok
}
}
"convert FlattenArrayIndices" in {
lpf.invoke1(StructuralLib.FlattenArrayIndices, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Identities, QSU.Rotation.FlattenArray) => ok
}
}
"convert ShiftMap" in {
lpf.invoke1(StructuralLib.ShiftMap, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Values, QSU.Rotation.ShiftMap) => ok
}
}
"convert ShiftMapKeys" in {
lpf.invoke1(StructuralLib.ShiftMapKeys, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Identities, QSU.Rotation.ShiftMap) => ok
}
}
"convert ShiftArray" in {
lpf.invoke1(StructuralLib.ShiftArray, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Values, QSU.Rotation.ShiftArray) => ok
}
}
"convert ShiftArrayIndices" in {
lpf.invoke1(StructuralLib.ShiftArrayIndices, read("foo")) must readQsuAs {
case Transpose(TRead(_), QSU.Retain.Identities, QSU.Rotation.ShiftArray) => ok
}
}
"convert GroupBy" in {
lpf.invoke2(SetLib.GroupBy, read("foo"), read("bar")) must readQsuAs {
case GroupBy(TRead("foo"), TRead("bar")) => ok
}
}
"convert Squash" in {
lpf.invoke1(IdentityLib.Squash, read("foo")) must readQsuAs {
case DimEdit(TRead("foo"), QSU.DTrans.Squash()) => ok
}
}
"convert Filter" in {
lpf.invoke2(SetLib.Filter, read("foo"), read("bar")) must readQsuAs {
case LPFilter(TRead("foo"), TRead("bar")) => ok
}
}
"convert Sample" in {
lpf.invoke2(SetLib.Sample, read("foo"), read("bar")) must readQsuAs {
case Subset(TRead("foo"), Sample, TRead("bar")) => ok
}
}
"convert Take" in {
lpf.invoke2(SetLib.Take, read("foo"), read("bar")) must readQsuAs {
case Subset(TRead("foo"), Take, TRead("bar")) => ok
}
}
"convert Drop" in {
lpf.invoke2(SetLib.Drop, read("foo"), read("bar")) must readQsuAs {
case Subset(TRead("foo"), Drop, TRead("bar")) => ok
}
}
"convert Union" in {
lpf.invoke2(SetLib.Union, read("foo"), read("bar")) must readQsuAs {
case Union(TRead("foo"), TRead("bar")) => ok
}
}
"convert reductions" in {
lpf.invoke1(AggLib.Count, read("foo")) must readQsuAs {
case LPReduce(TRead("foo"), ReduceFuncs.Count(())) => ok
}
}
"convert unary mapping function" in {
lpf.invoke1(MathLib.Negate, read("foo")) must readQsuAs {
case Unary(TRead("foo"), IC(MapFuncsCore.Negate(SrcHole))) => ok
}
}
"convert binary mapping function" in {
lpf.invoke2(MathLib.Add, read("foo"), read("bar")) must readQsuAs {
case AutoJoin2C(
TRead("foo"),
TRead("bar"),
MapFuncsCore.Add(LeftSide, RightSide)) => ok
}
}
"convert ternary mapping function" in {
lpf.invoke3(RelationsLib.Cond, read("foo"), read("bar"), read("baz")) must readQsuAs {
case AutoJoin3C(
TRead("foo"),
TRead("bar"),
TRead("baz"),
MapFuncsCore.Cond(LeftSide3, Center, RightSide3)) => ok
}
}
"convert TemporalTrunc" in {
lpf.temporalTrunc(TemporalPart.Decade, read("foo")) must readQsuAs {
case Unary(
TRead("foo"),
IC(MapFuncsCore.TemporalTrunc(TemporalPart.Decade, SrcHole))) => ok
}
}
"convert join side" in {
lpf.joinSideName('heythere) must readQsuAs {
case JoinSideRef('heythere) => ok
}
}
"convert real join" in {
lpf.join(
read("foo"),
read("bar"),
JoinType.LeftOuter,
JoinCondition(
'left,
'right,
read("baz"))) must readQsuAs {
case LPJoin(
TRead("foo"),
TRead("bar"),
TRead("baz"),
JoinType.LeftOuter,
'left,
'right) => ok
}
}
"import let bindings" in {
lpf.let(
'tmp0,
read("foo"),
lpf.invoke2(
SetLib.Filter,
lpf.free('tmp0),
read("bar"))) must readQsuAs {
case LPFilter(TRead("foo"), TRead("bar")) => ok
}
}
"don't absolutely trust let binding collapse" in {
lpf.invoke2(
SetLib.Take,
lpf.let(
'tmp0,
read("foo"),
lpf.invoke2(
SetLib.Filter,
lpf.free('tmp0),
lpf.free('tmp0))),
lpf.let(
'tmp1,
read("foo"),
lpf.invoke2(
SetLib.Filter,
lpf.free('tmp1),
lpf.free('tmp1)))) must readQsuAs {
case qgraph => qgraph.vertices must haveSize(3)
}
}
"convert a sort" in {
lpf.sort(
read("foo"),
NEL(
read("bar") -> SortDir.Ascending,
read("baz") -> SortDir.Descending)) must readQsuAs {
case LPSort(
TRead("foo"),
NELE(
(TRead("bar"), SortDir.Ascending),
(TRead("baz"), SortDir.Descending))) => ok
}
}
"compress redundant first- and second-order nodes" in {
val qgraphM = reader(lpf.invoke2(SetLib.Filter, read("foo"), read("foo")))
val result = evaluate(qgraphM).toOption
result must beSome
result.get.vertices must haveSize(2)
}
"manage a straightforward query" in {
// select city from zips where city ~ "OULD.{0,2} CIT"
val input =
lpf.let(
'__tmp0,
read("zips"),
lpf.invoke2(
SetLib.Take,
lpf.invoke1(
IdentityLib.Squash,
lpf.invoke2(
StructuralLib.MapProject,
lpf.invoke2(
SetLib.Filter,
lpf.free('__tmp0),
lpf.let(
'__tmp2,
lpf.invoke2(
StructuralLib.MapProject,
lpf.free('__tmp0),
lpf.constant(Data.Str("city"))),
lpf.invoke3(
StringLib.Search,
lpf.free('__tmp2),
lpf.constant(Data.Str("OULD.{0,2} CIT")),
lpf.constant(Data.Bool(false))))),
lpf.constant(Data.Str("city")))),
lpf.constant(Data.Int(11))))
input must readQsuAs {
case Subset(
DimEdit(
AutoJoin2C(
LPFilter( // 8
Read(_, ExcludeId), // '__tmp1
AutoJoin3C( // 7
AutoJoin2C( // '__tmp2
Read(_, ExcludeId), // '__tmp1
DataConstant(Data.Str("city")),
MapFuncsCore.ProjectKey(LeftSide, RightSide)),
DataConstant(Data.Str("OULD.{0,2} CIT")),
DataConstant(Data.Bool(false)),
MapFuncsCore.Search(LeftSide3, Center, RightSide3))),
DataConstant(Data.Str("city")),
MapFuncsCore.ProjectKey(LeftSide, RightSide)),
QSU.DTrans.Squash()),
Take,
DataConstant(Data.Int(subsetTakeI))) =>
subsetTakeI mustEqual 11
}
}
}
def readQsuAs(pf: PartialFunction[QSUGraph, MatchResult[_]]): Matcher[Fix[LogicalPlan]] = {
new Matcher[Fix[LogicalPlan]] {
def apply[S <: Fix[LogicalPlan]](s: Expectable[S]): MatchResult[S] = {
val resulted = evaluate(reader(s.value)) leftMap { err =>
failure(s"reading produced planner error: ${err.shows}", s)
}
val continued = resulted rightMap { qgraph =>
val mapped = pf.lift(qgraph) map { r =>
result(
r.isSuccess,
s.description + " is correct: " + r.message,
s.description + " is incorrect: " + r.message,
s)
}
// TODO Show[QSUGraph[Fix]]
mapped.getOrElse(
failure(s"${qgraph.shows} did not match expected pattern", s))
}
continued.merge
}
}
}
def evaluate[A](fa: F[A]): PlannerError \\/ A = fa.run.runA(0L).value
}
|
quasar-analytics/quasar
|
qsu/src/test/scala/quasar/qsu/ReadLPSpec.scala
|
Scala
|
apache-2.0
| 11,477 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Source from now deprecated akka.contrib.pattern
*
* Copyright (C) 2009-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package org.squbs.pattern.orchestration
import akka.actor.Actor
import scala.annotation.tailrec
/**
* The aggregator is to be mixed into an actor for the aggregator behavior.
*/
@deprecated("The Orchestration module is deprecated. Please use Akka streams for safer orchestration instead.",
since = "0.15.0")
trait Aggregator {
this: Actor =>
private var processing = false
private val expectList = WorkList.empty[Actor.Receive]
private val addBuffer = WorkList.empty[Actor.Receive]
/**
* Adds the partial function to the receive set, to be removed on first match.
* @param fn The receive function.
* @return The same receive function.
*/
def expectOnce(fn: Actor.Receive): Actor.Receive = {
if (processing) addBuffer.add(fn, permanent = false)
else expectList.add(fn, permanent = false)
fn
}
/**
* Adds the partial function to the receive set and keeping it in the receive set till removed.
* @param fn The receive function.
* @return The same receive function.
*/
def expect(fn: Actor.Receive): Actor.Receive = {
if (processing) addBuffer.add(fn, permanent = true)
else expectList.add(fn, permanent = true)
fn
}
/**
* Removes the partial function from the receive set.
* @param fn The receive function.
* @return True if the partial function is removed, false if not found.
*/
def unexpect(fn: Actor.Receive): Boolean = {
if (expectList.remove(fn)) true
else if (processing && (addBuffer.remove(fn))) true
else false
}
/**
* Receive function for handling the aggregations.
*/
def receive: Actor.Receive = {
case msg if handleMessage(msg) => // already dealt with in handleMessage
}
/**
* Handles messages and matches against the expect list.
* @param msg The message to be handled.
* @return true if message is successfully processed, false otherwise.
*/
def handleMessage(msg: Any): Boolean = {
processing = true
try {
expectList.process { fn =>
var processed = true
fn.applyOrElse(msg, (_: Any) => processed = false)
processed
}
} finally {
processing = false
expectList.addAll(addBuffer)
addBuffer.removeAll()
}
}
}
/**
* Provides the utility methods and constructors to the WorkList class.
*/
@deprecated("The Orchestration module is deprecated. Please use Akka streams for safer orchestration instead.",
since = "0.15.0")
object WorkList {
def empty[T] = new WorkList[T]
/**
* Singly linked list entry implementation for WorkList.
* @param ref The item reference, None for head entry
* @param permanent If the entry is to be kept after processing
*/
class Entry[T](val ref: Option[T], val permanent: Boolean) {
var next: Entry[T] = null
var isDeleted = false
}
}
/**
* Fast, small, and dirty implementation of a linked list that removes transient work entries once they are processed.
* The list is not thread safe! However it is expected to be reentrant. This means a processing function can add/remove
* entries from the list while processing. Most important, a processing function can remove its own entry from the list.
* The first remove must return true and any subsequent removes must return false.
*/
@deprecated("The Orchestration module is deprecated. Please use Akka streams for safer orchestration instead.",
since = "0.15.0")
class WorkList[T] {
import WorkList._
val head = new Entry[T](None, true)
var tail = head
/**
* Appends an entry to the work list.
* @param ref The entry.
* @return The updated work list.
*/
def add(ref: T, permanent: Boolean) = {
if (tail == head) {
tail = new Entry[T](Some(ref), permanent)
head.next = tail
} else {
tail.next = new Entry[T](Some(ref), permanent)
tail = tail.next
}
this
}
/**
* Removes an entry from the work list
* @param ref The entry.
* @return True if the entry is removed, false if the entry is not found.
*/
def remove(ref: T): Boolean = {
@tailrec
def remove(parent: Entry[T], entry: Entry[T]): Boolean = {
if (entry.ref.get == ref) {
parent.next = entry.next // Remove entry
if (tail == entry) tail = parent
entry.isDeleted = true
true
} else if (entry.next != null) remove(entry, entry.next)
else false
}
if (head.next == null) false else remove(head, head.next)
}
/**
* Tries to process each entry using the processing function. Stops at the first entry processing succeeds.
* If the entry is not permanent, the entry is removed.
* @param processFn The processing function, returns true if processing succeeds.
* @return true if an entry has been processed, false if no entries are processed successfully.
*/
def process(processFn: T => Boolean): Boolean = {
@tailrec
def process(parent: Entry[T], entry: Entry[T]): Boolean = {
val processed = processFn(entry.ref.get)
if (processed) {
if (!entry.permanent && !entry.isDeleted) {
parent.next = entry.next // Remove entry
if (tail == entry) tail = parent
entry.isDeleted = true
}
true // Handled
} else if (entry.next != null) process(entry, entry.next)
else false
}
if (head.next == null) false else process(head, head.next)
}
/**
* Appends another WorkList to this WorkList.
* @param other The other WorkList
* @return This WorkList
*/
def addAll(other: WorkList[T]) = {
if (other.head.next != null) {
tail.next = other.head.next
tail = other.tail
}
this
}
/**
* Removes all entries from this WorkList
* @return True if at least one entry is removed. False if none is removed.
*/
def removeAll() = {
if (head.next == null) false
else {
head.next = null
tail = head
true
}
}
}
|
paypal/squbs
|
squbs-pattern/src/main/scala/org/squbs/pattern/orchestration/Aggregator.scala
|
Scala
|
apache-2.0
| 6,696 |
package argonaut
import monocle.macros.GenLens
import scalaz._
import scala.annotation._
/**
* Parameters for pretty-printing a JSON value.
*
* @author Tony Morris
*
* @param indent The indentation to use if any format strings contain a new line.
* @param lbraceLeft Spaces to insert to left of a left brace.
* @param lbraceRight Spaces to insert to right of a left brace.
* @param rbraceLeft Spaces to insert to left of a right brace.
* @param rbraceRight Spaces to insert to right of a right brace.
* @param lbracketLeft Spaces to insert to left of a left bracket.
* @param lbracketRight Spaces to insert to right of a left bracket.
* @param rbracketLeft Spaces to insert to left of a right bracket.
* @param rbracketRight Spaces to insert to right of a right bracket.
* @param lrbracketsEmpty Spaces to insert for an empty array.
* @param arrayCommaLeft Spaces to insert to left of a comma in an array.
* @param arrayCommaRight Spaces to insert to right of a comma in an array.
* @param objectCommaLeft Spaces to insert to left of a comma in an object.
* @param objectCommaRight Spaces to insert to right of a comma in an object.
* @param colonLeft Spaces to insert to left of a colon.
* @param colonRight Spaces to insert to right of a colon.
* @param preserveOrder Determines if field ordering should be preserved.
* @param dropNullKeys Determines if object fields with values of null are dropped from the output.
*/
case class PrettyParams(
indent: String
, lbraceLeft: String
, lbraceRight: String
, rbraceLeft: String
, rbraceRight: String
, lbracketLeft: String
, lbracketRight: String
, rbracketLeft: String
, rbracketRight: String
, lrbracketsEmpty: String
, arrayCommaLeft: String
, arrayCommaRight: String
, objectCommaLeft: String
, objectCommaRight: String
, colonLeft: String
, colonRight: String
, preserveOrder: Boolean
, dropNullKeys: Boolean
) {
private[this] final val openBraceText = "{"
private[this] final val closeBraceText = "}"
private[this] final val openArrayText = "["
private[this] final val closeArrayText = "]"
private[this] final val commaText = ","
private[this] final val colonText = ":"
private[this] final val nullText = "null"
private[this] final val trueText = "true"
private[this] final val falseText = "false"
private[this] final val stringEnclosureText = "\\""
private[this] val _lbraceLeft = addIndentation(lbraceLeft)
private[this] val _lbraceRight = addIndentation(lbraceRight)
private[this] val _rbraceLeft = addIndentation(rbraceLeft)
private[this] val _rbraceRight = addIndentation(rbraceRight)
private[this] val _lbracketLeft = addIndentation(lbracketLeft)
private[this] val _lbracketRight = addIndentation(lbracketRight)
private[this] val _rbracketLeft = addIndentation(rbracketLeft)
private[this] val _rbracketRight = addIndentation(rbracketRight)
private[this] val _lrbracketsEmpty = addIndentation(lrbracketsEmpty)
private[this] val _arrayCommaLeft = addIndentation(arrayCommaLeft)
private[this] val _arrayCommaRight = addIndentation(arrayCommaRight)
private[this] val _objectCommaLeft = addIndentation(objectCommaLeft)
private[this] val _objectCommaRight = addIndentation(objectCommaRight)
private[this] val _colonLeft = addIndentation(colonLeft)
private[this] val _colonRight = addIndentation(colonRight)
private[this] def addIndentation(s: String): Int => String = {
val lastNewLineIndex = s.lastIndexOf("\\n")
if (lastNewLineIndex < 0) {
_ => s
} else {
val afterLastNewLineIndex = lastNewLineIndex + 1
val start = s.substring(0, afterLastNewLineIndex)
val end = s.substring(afterLastNewLineIndex)
n => start + indent * n + end
}
}
import Memo._
private[this] def vectorMemo() = {
var vector: Vector[String] = Vector.empty
val memoFunction: (Int => String) => Int => String = f => k => {
val localVector = vector
val adjustedK = if (k < 0) 0 else k
if (localVector.size > adjustedK) {
localVector(adjustedK)
} else {
val newVector = Vector.tabulate(adjustedK + 1)(f)
vector = newVector
newVector.last
}
}
memo(memoFunction)
}
// TODO: Vector based memoisation.
private[this] final val lbraceMemo = vectorMemo(){depth: Int => "%s%s%s".format(_lbraceLeft(depth), openBraceText, _lbraceRight(depth + 1))}
private[this] final val rbraceMemo = vectorMemo(){depth: Int => "%s%s%s".format(_rbraceLeft(depth), closeBraceText, _rbraceRight(depth + 1))}
private[this] final val lbracketMemo = vectorMemo(){depth: Int => "%s%s%s".format(_lbracketLeft(depth), openArrayText, _lbracketRight(depth + 1))}
private[this] final val rbracketMemo = vectorMemo(){depth: Int => "%s%s%s".format(_rbracketLeft(depth), closeArrayText, _rbracketRight(depth))}
private[this] final val lrbracketsEmptyMemo = vectorMemo(){depth: Int => "%s%s%s".format(openArrayText, _lrbracketsEmpty(depth), closeArrayText)}
private[this] final val arrayCommaMemo = vectorMemo(){depth: Int => "%s%s%s".format(_arrayCommaLeft(depth + 1), commaText, _arrayCommaRight(depth + 1))}
private[this] final val objectCommaMemo = vectorMemo(){depth: Int => "%s%s%s".format(_objectCommaLeft(depth + 1), commaText, _objectCommaRight(depth + 1))}
private[this] final val colonMemo = vectorMemo(){depth: Int => "%s%s%s".format(_colonLeft(depth + 1), colonText, _colonRight(depth + 1))}
/**
* Returns a string representation of a pretty-printed JSON value.
*/
final def pretty(j: Json): String = {
import Json._
import StringEscaping._
@tailrec
def appendJsonString(builder: StringBuilder, jsonString: String, normalChars: Boolean = true): StringBuilder = {
if (normalChars) {
jsonString.span(isNormalChar) match {
case (prefix, suffix) => {
val prefixAppended = builder.append(prefix)
if (suffix.isEmpty) prefixAppended else appendJsonString(prefixAppended, suffix, false)
}
}
} else {
jsonString.span(isNotNormalChar) match {
case (prefix, suffix) => {
val prefixAppended = prefix.foldLeft(builder)((working, char) => working.append(escape(char)))
if (suffix.isEmpty) prefixAppended else appendJsonString(prefixAppended, suffix, true)
}
}
}
}
def encloseJsonString(builder: StringBuilder, jsonString: JsonString): StringBuilder = {
appendJsonString(builder.append(stringEnclosureText), jsonString).append(stringEnclosureText)
}
def trav(builder: StringBuilder, depth: Int, k: Json): StringBuilder = {
def lbrace(builder: StringBuilder): StringBuilder = {
builder.append(lbraceMemo(depth))
}
def rbrace(builder: StringBuilder): StringBuilder = {
builder.append(rbraceMemo(depth))
}
def lbracket(builder: StringBuilder): StringBuilder = {
builder.append(lbracketMemo(depth))
}
def rbracket(builder: StringBuilder): StringBuilder = {
builder.append(rbracketMemo(depth))
}
def lrbracketsEmpty(builder: StringBuilder): StringBuilder = {
builder.append(lrbracketsEmptyMemo(depth))
}
def arrayComma(builder: StringBuilder): StringBuilder = {
builder.append(arrayCommaMemo(depth))
}
def objectComma(builder: StringBuilder): StringBuilder = {
builder.append(objectCommaMemo(depth))
}
def colon(builder: StringBuilder): StringBuilder = {
builder.append(colonMemo(depth))
}
k.fold[StringBuilder](
builder.append(nullText)
, bool => builder.append(if (bool) trueText else falseText)
, n => n match {
case JsonLong(x) => builder append x.toString
case JsonDouble(x) => builder append x.toString
case JsonDecimal(x) => builder append x
case JsonBigDecimal(x) => builder append x.toString
}
, s => encloseJsonString(builder, s)
, e => if (e.isEmpty) {
lrbracketsEmpty(builder)
} else {
rbracket(e.foldLeft((true, lbracket(builder))){case ((firstElement, builder), subElement) =>
val withComma = if (firstElement) builder else arrayComma(builder)
val updatedBuilder = trav(withComma, depth + 1, subElement)
(false, updatedBuilder)
}._2)
}
, o => {
rbrace((if (preserveOrder) o.toList else o.toMap).foldLeft((true, lbrace(builder))){case ((firstElement, builder), (key, value)) =>
val ignoreKey = dropNullKeys && value.isNull
if (ignoreKey) {
(firstElement, builder)
} else {
val withComma = if (firstElement) builder else objectComma(builder)
(false, trav(colon(encloseJsonString(withComma, key)), depth + 1, value))
}
}._2)
}
)
}
trav(new StringBuilder(), 0, j).toString()
}
/**
* Returns a `Vector[Char]` representation of a pretty-printed JSON value.
*/
final def lpretty(j: Json): Vector[Char] = Vector.empty[Char] ++ pretty(j)
}
object StringEscaping {
final def escape(c: Char): String = (c: @switch) match {
case '\\\\' => "\\\\\\\\"
case '"' => "\\\\\\""
case '\\b' => "\\\\b"
case '\\f' => "\\\\f"
case '\\n' => "\\\\n"
case '\\r' => "\\\\r"
case '\\t' => "\\\\t"
case possibleUnicode => if (Character.isISOControl(possibleUnicode)) "\\\\u%04x".format(possibleUnicode.toInt) else possibleUnicode.toString
}
final val isNormalChar: Char => Boolean = char => (char: @switch) match {
case '\\\\' => false
case '"' => false
case '\\b' => false
case '\\f' => false
case '\\n' => false
case '\\r' => false
case '\\t' => false
case possibleUnicode => !Character.isISOControl(possibleUnicode)
}
final val isNotNormalChar: Char => Boolean = char => !isNormalChar(char)
}
object PrettyParams extends PrettyParamss
trait PrettyParamss {
/**
* A pretty-printer configuration that inserts no spaces.
*/
final val nospace: PrettyParams =
PrettyParams(
indent = ""
, lbraceLeft = ""
, lbraceRight = ""
, rbraceLeft = ""
, rbraceRight = ""
, lbracketLeft = ""
, lbracketRight = ""
, rbracketLeft = ""
, rbracketRight = ""
, lrbracketsEmpty = ""
, arrayCommaLeft = ""
, arrayCommaRight = ""
, objectCommaLeft = ""
, objectCommaRight = ""
, colonLeft = ""
, colonRight = ""
, preserveOrder = false
, dropNullKeys = false
)
/**
* A pretty-printer configuration that indents by the given spaces.
*/
final def pretty(indent: String): PrettyParams =
PrettyParams(
indent = indent
, lbraceLeft = ""
, lbraceRight = "\\n"
, rbraceLeft = "\\n"
, rbraceRight = ""
, lbracketLeft = ""
, lbracketRight = "\\n"
, rbracketLeft = "\\n"
, rbracketRight = ""
, lrbracketsEmpty = "\\n"
, arrayCommaLeft = ""
, arrayCommaRight = "\\n"
, objectCommaLeft = ""
, objectCommaRight = "\\n"
, colonLeft = " "
, colonRight = " "
, preserveOrder = false
, dropNullKeys = false
)
/**
* A pretty-printer configuration that indents by two spaces.
*/
final val spaces2: PrettyParams =
pretty(" ")
/**
* A pretty-printer configuration that indents by four spaces.
*/
final val spaces4: PrettyParams =
pretty(" ")
val lenser = GenLens[PrettyParams]
val indentL = lenser(_.indent)
val lbraceLeftL = lenser(_.lbraceLeft)
val lbraceRightL = lenser(_.lbraceRight)
val rbraceLeftL = lenser(_.rbraceLeft)
val rbraceRightL = lenser(_.rbraceRight)
val lbracketLeftL = lenser(_.lbracketLeft)
val lbracketRightL = lenser(_.lbracketRight)
val rbracketLeftL = lenser(_.rbracketLeft)
val rbracketRightL = lenser(_.rbracketRight)
val lrbracketsEmptyL = lenser(_.lrbracketsEmpty)
val arrayCommaLeftL = lenser(_.arrayCommaLeft)
val arrayCommaRightL = lenser(_.arrayCommaRight)
val objectCommaLeftL = lenser(_.objectCommaLeft)
val objectCommaRightL = lenser(_.objectCommaRight)
val colonLeftL = lenser(_.colonLeft)
val colonRightL = lenser(_.colonRight)
val preserveOrderL = lenser(_.preserveOrder)
val dropNullKeysL = lenser(_.dropNullKeys)
}
|
etorreborre/argonaut
|
src/main/scala/argonaut/PrettyParams.scala
|
Scala
|
bsd-3-clause
| 12,321 |
package bootstrap.liftweb
import net.liftweb._
import util._
import Helpers._
import common._
import http._
import sitemap._
import Loc._
import mapper._
import code.model._
import nl.malienkolders.htm.battle.comet._
import nl.malienkolders.htm.battle.model._
/**
* A class that's instantiated early and run. It allows the application
* to modify lift's environment
*/
class Boot {
def boot {
if (!DB.jndiJdbcConnAvailable_?) {
val vendor =
new StandardDBVendor(Props.get("db.driver") openOr "org.h2.Driver",
Props.get("db.url") openOr
"jdbc:h2:mem:htm_battle",
Props.get("db.user"), Props.get("db.password"))
LiftRules.unloadHooks.append(vendor.closeAllConnections_! _)
DB.defineConnectionManager(DefaultConnectionIdentifier, vendor)
}
// Use Lift's Mapper ORM to populate the database
// you don't need to use Mapper to use Lift... use
// any ORM you want
Schemifier.schemify(true, Schemifier.infoF _, User, Viewer, Participant, AdminServer)
// where to search snippet
LiftRules.addToPackages("nl.malienkolders.htm.battle")
// Build SiteMap
def sitemap = SiteMap(
Menu.i("Poule Selection") / "index",
Menu.i("Viewers") / "viewers",
Menu.i("Fight") / "fight")
//def sitemapMutators = User.sitemapMutator
// set the sitemap. Note if you don't want access control for
// each page, just comment this line out.
LiftRules.setSiteMapFunc(() => sitemap)
// Use jQuery 1.4
LiftRules.jsArtifacts = net.liftweb.http.js.jquery.JQuery14Artifacts
//Show the spinny image when an Ajax call starts
LiftRules.ajaxStart =
Full(() => LiftRules.jsArtifacts.show("ajax-loader").cmd)
// Make the spinny image go away when it ends
LiftRules.ajaxEnd =
Full(() => LiftRules.jsArtifacts.hide("ajax-loader").cmd)
LiftRules.noticesAutoFadeOut.default.set((noticeType: NoticeType.Value) => Full((1 seconds, 2 seconds)))
// Force the request to be UTF-8
LiftRules.early.append(_.setCharacterEncoding("UTF-8"))
// What is the function to test if a user is logged in?
LiftRules.loggedInTest = Full(() => User.loggedIn_?)
// Use HTML5 for rendering
LiftRules.htmlProperties.default.set((r: Req) =>
new Html5Properties(r.userAgent))
// Make a transaction span the whole HTTP request
S.addAround(DB.buildLoanWrapper)
}
}
|
hema-tournament-manager/htm
|
htm-battle/src/main/scala/bootstrap/liftweb/Boot.scala
|
Scala
|
apache-2.0
| 2,425 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package util.parsing.combinator
// A shallow wrapper over another CharSequence (usually a String)
//
// See SI-7710: in jdk7u6 String.subSequence stopped sharing the char array of the original
// string and began copying it.
// RegexParsers calls subSequence twice per input character: that's a lot of array copying!
private[combinator] class SubSequence(s: CharSequence, start: Int, val length: Int) extends CharSequence {
def this(s: CharSequence, start: Int) = this(s, start, s.length - start)
def charAt(i: Int) =
if (i >= 0 && i < length) s.charAt(start + i) else throw new IndexOutOfBoundsException(s"index: $i, length: $length")
def subSequence(_start: Int, _end: Int) = {
if (_start < 0 || _end < 0 || _end > length || _start > _end)
throw new IndexOutOfBoundsException(s"start: ${_start}, end: ${_end}, length: $length")
new SubSequence(s, start + _start, _end - _start)
}
override def toString = s.subSequence(start, start + length).toString
}
|
scala/scala-parser-combinators
|
shared/src/main/scala/scala/util/parsing/combinator/SubSequence.scala
|
Scala
|
apache-2.0
| 1,291 |
package scjson.converter
import scutil.core.implicits.*
import scutil.lang.*
object NumberBigDecimalConverters {
val IntToBigDecimal:JsonConverter[Int,BigDecimal] =
Converter total { it => BigDecimal exact it.toLong }
val BigDecimalToInt:JsonConverter[BigDecimal,Int] =
Converter { it =>
try {
Validated valid it.toIntExact
}
catch { case e:ArithmeticException =>
// fractional or not fitting
JsonInvalid(show"${it} is not an Int")
}
}
//------------------------------------------------------------------------------
val LongToBigDecimal:JsonConverter[Long,BigDecimal] =
Converter total BigDecimal.exact
val BigDecimalToLong:JsonConverter[BigDecimal,Long] =
Converter { it =>
try {
Validated valid it.toLongExact
}
catch { case e:ArithmeticException =>
// fractional or not fitting
JsonInvalid(show"${it} is not a Long")
}
}
//------------------------------------------------------------------------------
val BigIntToBigDecimal:JsonConverter[BigInt,BigDecimal] =
Converter total BigDecimal.exact
val BigDecimalToBigInt:JsonConverter[BigDecimal,BigInt] =
Converter { it =>
// fractional
it.toBigIntExact toValid JsonError(show"${it} is not a BigInt")
}
//------------------------------------------------------------------------------
val FloatToBigDecimal:JsonConverter[Float,BigDecimal] =
Converter { it =>
try {
Validated valid (BigDecimal exact it.toDouble)
}
catch { case e:NumberFormatException =>
// infinite or NaN
JsonInvalid(show"$it is not a BigDecimal")
}
}
// NOTE might return Double.NEGATIVE_INFINITY or Double.POSITIVE_INFINITY e
val BigDecimalToFloat:JsonConverter[BigDecimal,Float] =
Converter total (_.toFloat)
//------------------------------------------------------------------------------
val DoubleToBigDecimal:JsonConverter[Double,BigDecimal] =
Converter { it =>
try {
Validated valid (BigDecimal exact it)
}
catch { case e:NumberFormatException =>
// infinite or NaN
JsonInvalid(show"$it is not a BigDecimal")
}
}
// NOTE might return Double.NEGATIVE_INFINITY or Double.POSITIVE_INFINITY
val BigDecimalToDouble:JsonConverter[BigDecimal,Double] =
Converter total (_.toDouble)
}
|
ritschwumm/scjson
|
modules/converter/src/main/scala/converter/NumberBigDecimalConverters.scala
|
Scala
|
bsd-2-clause
| 2,264 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package api
import alg.Alg
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.Encoder
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import scala.language.implicitConversions
import scala.util.hashing.MurmurHash3
/** A `DataBag` implementation backed by a Spark `Dataset`. */
class SparkDataset[A: Meta] private[api](@transient private[emmalanguage] val rep: Dataset[A]) extends DataBag[A] {
import Meta.Projections._
import SparkDataset.encoderForType
import SparkDataset.wrap
import api.spark.fromRDD
import rep.sparkSession.sqlContext.implicits._
@transient override val m = implicitly[Meta[A]]
private[emmalanguage] implicit def spark = this.rep.sparkSession
// -----------------------------------------------------
// Structural recursion
// -----------------------------------------------------
override def fold[B: Meta](alg: Alg[A, B]): B =
try {
rep.map(x => alg.init(x)).reduce(alg.plus)
} catch {
case e: UnsupportedOperationException if e.getMessage == "empty collection" => alg.zero
case e: Throwable => throw e
}
// -----------------------------------------------------
// Monad Ops
// -----------------------------------------------------
override def map[B: Meta](f: (A) => B): DataBag[B] =
rep.map(f)
override def flatMap[B: Meta](f: (A) => DataBag[B]): DataBag[B] =
rep.flatMap((x: A) => f(x).collect())
def withFilter(p: (A) => Boolean): DataBag[A] =
rep.filter(p)
// -----------------------------------------------------
// Grouping
// -----------------------------------------------------
override def groupBy[K: Meta](k: (A) => K): DataBag[Group[K, DataBag[A]]] =
DataBag.from(rep.rdd).groupBy(k)
// -----------------------------------------------------
// Set operations
// -----------------------------------------------------
override def union(that: DataBag[A]): DataBag[A] = that match {
case dbag: ScalaSeq[A] => this union SparkDataset(dbag.rep)
case dbag: SparkRDD[A] => this.rep union dbag.rep.toDS()
case dbag: SparkDataset[A] => this.rep union dbag.rep
case _ => throw new IllegalArgumentException(s"Unsupported rhs for `union` of type: ${that.getClass}")
}
override def distinct: DataBag[A] =
rep.distinct
// -----------------------------------------------------
// Partition-based Ops
// -----------------------------------------------------
def sample(k: Int, seed: Long = 5394826801L): Vector[A] = {
// counts per partition, sorted by partition ID
val Seq(hd, tl@_*) = rep.rdd.zipWithIndex()
.mapPartitionsWithIndex({ (pid, it) =>
val sample = Array.fill(k)(Option.empty[A])
for ((e, i) <- it) {
if (i >= k) {
val j = util.RanHash(seed).at(i).nextLong(i + 1)
if (j < k) sample(j.toInt) = Some(e)
} else sample(i.toInt) = Some(e)
}
Seq(pid -> sample).toIterator
}).collect().sortBy(_._1).map(_._2).toSeq
// merge the sequence of samples and filter None values
val rs = for {
Some(v) <- tl.foldLeft(hd)((xs, ys) => for ((x, y) <- xs zip ys) yield y orElse x)
} yield v
rs.toVector
}
def zipWithIndex(): DataBag[(A, Long)] =
DataBag.from(rep.rdd.zipWithIndex())
// -----------------------------------------------------
// Sinks
// -----------------------------------------------------
override def writeCSV(path: String, format: CSV)
(implicit converter: CSVConverter[A]): Unit = rep.write
.option("header", format.header)
.option("delimiter", format.delimiter.toString)
.option("charset", format.charset.toString)
.option("quote", format.quote.getOrElse('"').toString)
.option("escape", format.escape.getOrElse('\\\\').toString)
.option("nullValue", format.nullValue)
.mode("overwrite").csv(path)
override def writeText(path: String): Unit =
rep.write.text(path)
def writeParquet(path: String, format: Parquet)
(implicit converter: ParquetConverter[A]): Unit = rep.write
.option("binaryAsString", format.binaryAsString)
.option("int96AsTimestamp", format.int96AsTimestamp)
.option("cacheMetadata", format.cacheMetadata)
.option("codec", format.codec.toString)
.mode("overwrite").parquet(path)
def collect(): Seq[A] = collected
private lazy val collected: Seq[A] =
rep.collect()
// -----------------------------------------------------
// Pre-defined folds
// -----------------------------------------------------
override def reduceOption(p: (A, A) => A): Option[A] =
try {
Option(rep.reduce(p))
} catch {
case e: UnsupportedOperationException if e.getMessage == "empty collection" => None
case e: Throwable => throw e
}
override def find(p: A => Boolean): Option[A] =
try {
Option(rep.filter(p).head())
} catch {
case e: NoSuchElementException if e.getMessage == "next on empty iterator" => None
case e: Throwable => throw e
}
override def min(implicit o: Ordering[A]): A =
reduceOption(o.min).get
override def max(implicit o: Ordering[A]): A =
reduceOption(o.max).get
// -----------------------------------------------------
// equals and hashCode
// -----------------------------------------------------
override def equals(o: Any): Boolean =
super.equals(o)
override def hashCode(): Int = {
val (a, b, c, n) = rep
.mapPartitions(it => {
var a, b, n = 0
var c = 1
it foreach { x =>
val h = x.##
a += h
b ^= h
if (h != 0) c *= h
n += 1
}
Some((a, b, c, n)).iterator
})
.collect()
.fold((0, 0, 1, 0))((x, r) => (x, r) match {
case ((a1, b1, c1, n1), (a2, b2, c2, n2)) => (a1 + a2, b1 ^ b2, c1 * c2, n1 + n2)
})
var h = MurmurHash3.traversableSeed
h = MurmurHash3.mix(h, a)
h = MurmurHash3.mix(h, b)
h = MurmurHash3.mixLast(h, c)
MurmurHash3.finalizeHash(h, n)
}
}
object SparkDataset extends DataBagCompanion[SparkSession] {
import Meta.Projections._
implicit def encoderForType[T: Meta]: Encoder[T] =
ExpressionEncoder[T]
// ---------------------------------------------------------------------------
// Constructors
// ---------------------------------------------------------------------------
def empty[A: Meta](
implicit spark: SparkSession
): DataBag[A] = spark.emptyDataset[A]
def apply[A: Meta](values: Seq[A])(
implicit spark: SparkSession
): DataBag[A] = spark.createDataset(values)
def readText(path: String)(
implicit spark: SparkSession
): DataBag[String] = spark.read.textFile(path)
def readCSV[A: Meta : CSVConverter](path: String, format: CSV)(
implicit spark: SparkSession
): DataBag[A] = spark.read
.option("header", format.header)
.option("delimiter", format.delimiter.toString)
.option("charset", format.charset.toString)
.option("quote", format.quote.getOrElse('"').toString)
.option("escape", format.escape.getOrElse('\\\\').toString)
.option("comment", format.escape.map(_.toString).orNull)
.option("nullValue", format.nullValue)
.schema(encoderForType[A].schema)
.csv(path).as[A]
def readParquet[A: Meta : ParquetConverter](path: String, format: Parquet)(
implicit spark: SparkSession
): DataBag[A] = spark.read
.option("binaryAsString", format.binaryAsString)
.option("int96AsTimestamp", format.int96AsTimestamp)
.option("cacheMetadata", format.cacheMetadata)
.option("codec", format.codec.toString)
.schema(encoderForType[A].schema)
.parquet(path).as[A]
// ---------------------------------------------------------------------------
// Implicit Rep -> DataBag conversion
// ---------------------------------------------------------------------------
implicit def wrap[A: Meta](rep: Dataset[A]): DataBag[A] =
new SparkDataset(rep)
}
|
aalexandrov/emma
|
emma-spark/src/main/scala/org/emmalanguage/api/SparkDataset.scala
|
Scala
|
apache-2.0
| 8,690 |
package org.monkeynuthead.monkeybarrel.core
/**
* Type alias definitions
*/
object Types {
type Attribute = String
type AttributeValue = String
type Measure = String
type AggFunc = (Double, Double) => Double
}
|
georgenicoll/monkey-barrel
|
old20150804/core/shared/src/main/scala/org/monkeynuthead/monkeybarrel/core/Types.scala
|
Scala
|
gpl-2.0
| 236 |
package me.aihe.dataframe
import scala.util.Try
/**
* Created by aihe on 12/21/15.
*/
case class Row(private[dataframe] val index: Int, private[dataframe] val data: Seq[Any], private[dataframe] val names: Seq[String]) {
lazy val valuesMap = Map(names.zip(data): _*)
val length = data.length
val size = length
override def toString = {
names.mkString("", "\\t", "\\n") + data.mkString("\\t")
}
def apply(index: Int): Any = {
require(0 <= index && index < data.size)
data(index)
}
def apply(name: String): Any = {
require(valuesMap.contains(name))
valuesMap(name)
}
def get(index: Int): Any = apply(index)
def get(name: String): Any = apply(name)
def getAs[T](i: Int): Option[T] = Try(apply(i).asInstanceOf[T]).toOption
def getAs[T](name: String): Option[T] = Try(apply(name).asInstanceOf[T]).toOption
}
|
AiHe/DataFrame
|
src/main/scala/me/aihe/dataframe/Row.scala
|
Scala
|
apache-2.0
| 862 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
trait PropSpecLike extends PropSpecRegistration with ClassicTests
|
cheeseng/scalatest
|
scalatest/src/main/scala/org/scalatest/PropSpecLike.scala
|
Scala
|
apache-2.0
| 690 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.table.api.java.utils.UserDefinedAggFunctions.WeightedAvgWithRetract
import org.apache.flink.table.api.{Table, ValidationException}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.expressions.utils.Func1
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.{StreamTableTestUtil, TableTestBase}
import org.junit.Test
class OverWindowTest extends TableTestBase {
private val streamUtil: StreamTableTestUtil = streamTestUtil()
val table: Table = streamUtil.addTable[(Int, String, Long)]("MyTable",
'a, 'b, 'c, 'proctime.proctime, 'rowtime.rowtime)
@Test(expected = classOf[ValidationException])
def testInvalidWindowAlias(): Unit = {
val result = table
.window(Over partitionBy 'c orderBy 'rowtime preceding 2.rows as 'w)
.select('c, 'b.count over 'x)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testOrderBy(): Unit = {
val result = table
.window(Over partitionBy 'c orderBy 'abc preceding 2.rows as 'w)
.select('c, 'b.count over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testPrecedingAndFollowingUsingIsLiteral(): Unit = {
val result = table
.window(Over partitionBy 'c orderBy 'rowtime preceding 2 following "xx" as 'w)
.select('c, 'b.count over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testPrecedingAndFollowingUsingSameType(): Unit = {
val result = table
.window(Over partitionBy 'c orderBy 'rowtime preceding 2.rows following CURRENT_RANGE as 'w)
.select('c, 'b.count over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testPartitionByWithUnresolved(): Unit = {
val result = table
.window(Over partitionBy 'a + 'b orderBy 'rowtime preceding 2.rows as 'w)
.select('c, 'b.count over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testPartitionByWithNotKeyType(): Unit = {
val table2 = streamUtil.addTable[(Int, String, Either[Long, String])]("MyTable2", 'a, 'b, 'c)
val result = table2
.window(Over partitionBy 'c orderBy 'rowtime preceding 2.rows as 'w)
.select('c, 'b.count over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testPrecedingValue(): Unit = {
val result = table
.window(Over orderBy 'rowtime preceding -1.rows as 'w)
.select('c, 'b.count over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testFollowingValue(): Unit = {
val result = table
.window(Over orderBy 'rowtime preceding 1.rows following -2.rows as 'w)
.select('c, 'b.count over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test(expected = classOf[ValidationException])
def testUdAggWithInvalidArgs(): Unit = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over orderBy 'rowtime preceding 1.minutes as 'w)
.select('c, weightedAvg('b, 'a) over 'w)
streamUtil.tEnv.optimize(result.getRelNode, updatesAsRetraction = true)
}
@Test
def testScalarFunctionsOnOverWindow() = {
val weightedAvg = new WeightedAvgWithRetract
val plusOne = Func1
val result = table
.window(Over partitionBy 'b orderBy 'proctime preceding UNBOUNDED_ROW as 'w)
.select(
plusOne('a.sum over 'w as 'wsum) as 'd,
('a.count over 'w).exp(),
(weightedAvg('c, 'a) over 'w) + 1,
"AVG:".toExpr + (weightedAvg('c, 'a) over 'w),
array(weightedAvg('c, 'a) over 'w, 'a.count over 'w))
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "b", "c", "proctime")
),
term("partitionBy", "b"),
term("orderBy", "proctime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "b", "c", "proctime",
"SUM(a) AS w0$o0",
"COUNT(a) AS w0$o1",
"WeightedAvgWithRetract(c, a) AS w0$o2")
),
term("select",
s"${plusOne.functionIdentifier}(w0$$o0) AS d",
"EXP(CAST(w0$o1)) AS _c1",
"+(w0$o2, 1) AS _c2",
"||('AVG:', CAST(w0$o2)) AS _c3",
"ARRAY(w0$o2, w0$o1) AS _c4")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'b orderBy 'proctime preceding 2.rows following CURRENT_ROW as 'w)
.select('c, weightedAvg('c, 'a) over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "b", "c", "proctime")
),
term("partitionBy", "b"),
term("orderBy", "proctime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "b", "c", "proctime", "WeightedAvgWithRetract(c, a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'a orderBy 'proctime preceding 2.hours following CURRENT_RANGE as 'w)
.select('a, weightedAvg('c, 'a) over 'w as 'myAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("partitionBy", "a"),
term("orderBy", "proctime"),
term("range", "BETWEEN 7200000 PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"proctime",
"WeightedAvgWithRetract(c, a) AS w0$o0"
)
),
term("select", "a", "w0$o0 AS myAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedNonPartitionedRangeOver() = {
val result = table
.window(Over orderBy 'proctime preceding 10.second as 'w)
.select('a, 'c.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("range", "BETWEEN 10000 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime", "COUNT(c) AS w0$o0")
),
term("select", "a", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeBoundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'proctime preceding 2.rows as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'c orderBy 'proctime preceding UNBOUNDED_RANGE following
CURRENT_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, weightedAvg('c, 'a) over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("partitionBy", "c"),
term("orderBy", "proctime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"proctime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS _c3"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'c orderBy 'proctime preceding UNBOUNDED_ROW following CURRENT_ROW as 'w)
.select('c, 'a.count over 'w, weightedAvg('c, 'a) over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("partitionBy", "c"),
term("orderBy", "proctime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1")
),
term("select", "c", "w0$o0 AS _c1", "w0$o1 AS _c2")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedNonPartitionedRangeOver() = {
val result = table
.window(
Over orderBy 'proctime preceding UNBOUNDED_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, 'a.sum over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"proctime",
"COUNT(a) AS w0$o0",
"SUM(a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS _c3"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testProcTimeUnboundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'proctime preceding UNBOUNDED_ROW as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "proctime")
),
term("orderBy", "proctime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "proctime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'b orderBy 'rowtime preceding 2.rows following CURRENT_ROW as 'w)
.select('c, 'b.count over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "b", "c", "rowtime")
),
term("partitionBy", "b"),
term("orderBy", "rowtime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "b", "c", "rowtime",
"COUNT(b) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1")
),
term("select", "c", "w0$o0 AS _c1", "w0$o1 AS wAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(
Over partitionBy 'a orderBy 'rowtime preceding 2.hours following CURRENT_RANGE as 'w)
.select('a, 'c.avg over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("partitionBy", "a"),
term("orderBy", "rowtime"),
term("range", "BETWEEN 7200000 PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"rowtime",
"AVG(c) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1"
)
),
term("select", "a", "w0$o0 AS _c1", "w0$o1 AS wAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedNonPartitionedRangeOver() = {
val result = table
.window(Over orderBy 'rowtime preceding 10.second as 'w)
.select('a, 'c.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("range", "BETWEEN 10000 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime", "COUNT(c) AS w0$o0")
),
term("select", "a", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeBoundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'rowtime preceding 2.rows as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("rows", "BETWEEN 2 PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedPartitionedRangeOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'c orderBy 'rowtime preceding UNBOUNDED_RANGE following
CURRENT_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("partitionBy", "c"),
term("orderBy", "rowtime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"rowtime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS wAvg"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedPartitionedRowsOver() = {
val weightedAvg = new WeightedAvgWithRetract
val result = table
.window(Over partitionBy 'c orderBy 'rowtime preceding UNBOUNDED_ROW following
CURRENT_ROW as 'w)
.select('c, 'a.count over 'w, weightedAvg('c, 'a) over 'w as 'wAvg)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("partitionBy", "c"),
term("orderBy", "rowtime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime",
"COUNT(a) AS w0$o0",
"WeightedAvgWithRetract(c, a) AS w0$o1")
),
term("select", "c", "w0$o0 AS _c1", "w0$o1 AS wAvg")
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedNonPartitionedRangeOver() = {
val result = table
.window(
Over orderBy 'rowtime preceding UNBOUNDED_RANGE as 'w)
.select('a, 'c, 'a.count over 'w, 'a.sum over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("range", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term(
"select",
"a",
"c",
"rowtime",
"COUNT(a) AS w0$o0",
"SUM(a) AS w0$o1"
)
),
term(
"select",
"a",
"c",
"w0$o0 AS _c2",
"w0$o1 AS _c3"
)
)
streamUtil.verifyTable(result, expected)
}
@Test
def testRowTimeUnboundedNonPartitionedRowsOver() = {
val result = table
.window(Over orderBy 'rowtime preceding UNBOUNDED_ROW as 'w)
.select('c, 'a.count over 'w)
val expected =
unaryNode(
"DataStreamCalc",
unaryNode(
"DataStreamOverAggregate",
unaryNode(
"DataStreamCalc",
streamTableNode(0),
term("select", "a", "c", "rowtime")
),
term("orderBy", "rowtime"),
term("rows", "BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW"),
term("select", "a", "c", "rowtime", "COUNT(a) AS w0$o0")
),
term("select", "c", "w0$o0 AS _c1")
)
streamUtil.verifyTable(result, expected)
}
}
object OverWindowTest{
case class Pojo(id: Long, name: String)
}
|
gustavoanatoly/flink
|
flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/stream/table/OverWindowTest.scala
|
Scala
|
apache-2.0
| 20,433 |
/*
Again, it is somewhat subjective whether to throw an exception when asked to drop more elements than the list contains. The usual default for `drop` is not to throw an exception, since it is typically used in cases where this is not indicative of a programming error. If you pay attention to how you use `drop`, it is often in cases where the length of the input list is unknown, and the number of elements to be dropped is being computed from something else. If `drop` threw an exception, we'd have to first compute or check the length and only drop up to that many elements.
*/
def drop[A](l: List[A], n: Int): List[A] =
if (n <= 0) l
else l match {
case Nil => Nil
case Cons(_,t) => drop(t, n-1)
}
|
willcodejavaforfood/fpinscala
|
answerkey/datastructures/4.answer.scala
|
Scala
|
mit
| 722 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Copyright (C) 2015 Andre White ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.truthencode.ddo.model.item.weapon
import enumeratum.{Enum, EnumEntry}
import io.truthencode.ddo.model.effect.Damage
import io.truthencode.ddo.support.StringUtils.Extensions
import io.truthencode.ddo.support.TraverseOps.Joinable
import io.truthencode.ddo.support.naming.{DisplayName, FriendlyDisplay}
import io.truthencode.ddo.support.{Bludgeoning, Piercing, Slashing}
import scala.collection.immutable.IndexedSeq
/**
* Enumerates the specific base types of weapons available in DDO, i.e. Kopesh, Dagger etc.
*/
sealed trait WeaponCategory
extends EnumEntry with Damage with DefaultDeliveryMethod with DisplayName with FriendlyDisplay {
/**
* Sets or maps the source text for the DisplayName.
*
* @return
* Source text.
*/
override protected def nameSource: String =
entryName.splitByCase.toPascalCase
// lazy val weaponClass =
}
// scalastyle:off number.of.types number.of.methods
/**
* Holds the basic (Default) weapon types, swords, axes etc.
*
* @todo
* Handle orbs and rune arms, orbs should be shields, but rune arm is only off-hand only with
* non-physical damage
*/
object WeaponCategory extends Enum[WeaponCategory] {
// RuneArm,
lazy val values: IndexedSeq[WeaponCategory] = findValues
/**
* These weapons get +3 to threat range with IC
* @see
* [[https://ddowiki.com/page/Improved_Critical]]
*/
val icPlus3: Seq[WeaponCategory] =
LazyList(Falchion, GreatCrossbow, Kukris, Rapier, Scimitar)
/**
* These weapons get +2 to threat range with IC
* @see
* [[https://ddowiki.com/page/Improved_Critical]]
*/
val icPlus2: Seq[WeaponCategory] = LazyList(
BastardSword,
Dagger,
Greatsword,
HeavyCrossbow,
Khopesh,
LightCrossbow,
Longsword,
RepeatingHeavyCrossbow,
RepeatingLightCrossbow,
Shortsword,
ThrowingDagger)
/**
* These weapons get +1 to threat range with IC (Everything Not in the plus 2 / 3 lists.)
* @see
* [[https://ddowiki.com/page/Improved_Critical]]
*/
val icPlus1: Seq[WeaponCategory] = WeaponCategory.values.nSelect(icPlus3.concat(icPlus2))
/**
* Filters weapons for Improved Critical Threat modifiers according to source on ddowiki
* [[https://ddowiki.com/page/Improved_Critical]]
* @return
* Collection of Weapons with appropriate modifiers in a Tuple i.e. Seq((Falchion,3),...)
*/
def improvedCriticalRangeByWeapon(weaponClass: WeaponClass): Seq[(WeaponCategory, Int)] =
WeaponClass.values.flatMap { wc =>
filterByWeaponClass(weaponClass).map { weapon =>
// The item we are looking for is in one of these lists
val a1 = icPlus1.filter(_ == weapon).flatMap(optPlus(_, 1))
val a2 = icPlus2.filter(_ == weapon).flatMap(optPlus(_, 2))
val a3 = icPlus3.filter(_ == weapon).flatMap(optPlus(_, 3))
val squish = (a1 ++ a2 ++ a3)
val squished = squish.head
squished
}
}
/**
* Used by [[improvedCriticalRangeByWeapon]] to safely locate and build an array of weapons with a
* specific value. This routine may be useful elsewhere (thus parameterized) but essentially a one
* off.
* @param t
* Possibly null / empty type
* @param n
* Number to add to the Tuple pair if t is not null.
* @tparam T
* Type of t
* @return
* a Option[tuple] of (t,n) or none if t is null / empty.
*/
def optPlus[T](t: T, n: Int): Option[(T, Int)] = {
val x = Option(t)
Option(x) match {
case Some(_) => Some(t, n)
case _ => None
}
}
def filterByWeaponClass(weaponClass: WeaponClass): Seq[WeaponCategory] =
WeaponCategory.values.collect(weaponClassToCategory).filter(_._2 == weaponClass).map(_._1)
def weaponClassToCategory: PartialFunction[WeaponCategory, (WeaponCategory, WeaponClass)] = {
case x: RangeDamage => (x, WeaponClass.Ranged)
case x: ThrownDamage => (x, WeaponClass.Thrown)
case x: Bludgeoning => (x, WeaponClass.Bludgeon)
case x: Piercing => (x, WeaponClass.Piercing)
case x: Slashing => (x, WeaponClass.Slashing)
}
def exoticWeapons: Seq[WeaponCategory with ExoticWeapon] = {
for {
w <- WeaponCategory.values.filter { x =>
x match {
case _: ExoticWeapon => true
case _ => false
}
}
} yield w.asInstanceOf[WeaponCategory with ExoticWeapon]
}
def martialWeapons: Seq[WeaponCategory with MartialWeapon] = {
for {
w <- WeaponCategory.values.filter { x =>
x match {
case _: MartialWeapon => true
case _ => false
}
}
} yield w.asInstanceOf[WeaponCategory with MartialWeapon]
}
def simpleWeapons: Seq[WeaponCategory with SimpleWeapon] = {
for {
w <- WeaponCategory.values.filter { x =>
x match {
case _: SimpleWeapon => true
case _ => false
}
}
} yield w.asInstanceOf[WeaponCategory with SimpleWeapon]
}
case object BastardSword extends WeaponCategory with ExoticWeapon with MeleeDamage with Slashing
case object BattleAxe extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object Club extends WeaponCategory with SimpleWeapon with MeleeDamage with Bludgeoning
case object Dagger extends WeaponCategory with SimpleWeapon with MeleeDamage with Piercing
case object Dart extends WeaponCategory with SimpleWeapon with ThrownDamage with Piercing
case object DwarvenWarAxe
extends WeaponCategory with ExoticWeapon with MeleeDamage with Piercing {
/**
* Sets or maps the source text for the DisplayName.
*
* @return
* Source text.
*/
override protected def nameSource: String = "Dwarven Axe".toPascalCase
}
case object Falchion extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object GreatAxe extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object GreatClub extends WeaponCategory with MartialWeapon with MeleeDamage with Bludgeoning
//
case object GreatCrossbow extends WeaponCategory with ExoticWeapon with RangeDamage with Piercing
case object Greatsword extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object HandAxe extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object Handwrap extends WeaponCategory with SimpleWeapon with MeleeDamage with Bludgeoning
case object HeavyCrossbow extends WeaponCategory with SimpleWeapon with MeleeDamage with Piercing
case object HeavyMace extends WeaponCategory with SimpleWeapon with MeleeDamage with Slashing
case object HeavyPick extends WeaponCategory with MartialWeapon with MeleeDamage with Piercing
case object Kama extends WeaponCategory with ExoticWeapon with MeleeDamage with Slashing
case object Khopesh extends WeaponCategory with ExoticWeapon with MeleeDamage with Slashing
case object Kukris extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object LightCrossbow extends WeaponCategory with SimpleWeapon with RangeDamage with Piercing
case object LightHammer
extends WeaponCategory with MartialWeapon with MeleeDamage with Bludgeoning
case object LightMace extends WeaponCategory with SimpleWeapon with MeleeDamage with Bludgeoning
case object LightPick extends WeaponCategory with MartialWeapon with MeleeDamage with Piercing
case object Longbow extends WeaponCategory with MartialWeapon with RangeDamage with Piercing
case object Longsword extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object Maul extends WeaponCategory with MartialWeapon with MeleeDamage with Bludgeoning
case object Morningstar extends WeaponCategory with SimpleWeapon with MeleeDamage with Bludgeoning
case object Quarterstaff
extends WeaponCategory with SimpleWeapon with MeleeDamage with Bludgeoning
case object Rapier extends WeaponCategory with MartialWeapon with MeleeDamage with Piercing
case object RepeatingHeavyCrossbow
extends WeaponCategory with ExoticWeapon with RangeDamage with Piercing
case object RepeatingLightCrossbow
extends WeaponCategory with ExoticWeapon with RangeDamage with Piercing
// case object RuneArm extends WeaponCategory
case object Scimitar extends WeaponCategory with MartialWeapon with MeleeDamage with Slashing
case object Shortbow extends WeaponCategory with MartialWeapon with RangeDamage with Piercing
case object Shortsword extends WeaponCategory with MartialWeapon with MeleeDamage with Piercing
case object Shuriken extends WeaponCategory with ExoticWeapon with ThrownDamage with Piercing
case object Sickle extends WeaponCategory with SimpleWeapon with MeleeDamage with Slashing
case object SimpleProjectile
extends WeaponCategory with SimpleWeapon with ThrownDamage with Bludgeoning
case object ThrowingAxe extends WeaponCategory with MartialWeapon with ThrownDamage with Slashing
case object ThrowingDagger
extends WeaponCategory with SimpleWeapon with ThrownDamage with Piercing
case object ThrowingHammer
extends WeaponCategory with MartialWeapon with ThrownDamage with Bludgeoning
case object Warhammer
extends WeaponCategory with MartialWeapon with Product with Serializable with MeleeDamage
with Bludgeoning
}
// scalastyle:on number.of.types number.of.methods
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/item/weapon/WeaponCategory.scala
|
Scala
|
apache-2.0
| 10,936 |
package toguru.toggles
import akka.actor.{ActorRef, Props}
import akka.pattern.ask
import akka.persistence.query.EventEnvelope
import toguru.helpers.ActorSpec
import toguru.toggles.AuditLog.Entry
import toguru.toggles.AuditLogActor._
import toguru.toggles.events._
import scala.concurrent.duration._
class AuditLogActorSpec extends ActorSpec {
def meta(time: Long) = Some(Metadata(time, "testUser"))
def rollout(p: Int) = Some(Rollout(p))
val events = List(
Entry("toggle-1", ToggleCreated("toggle 1", "first toggle", Map("team" -> "Toguru team"), meta(0))),
Entry("toggle-1", ToggleUpdated("toggle 1", "very first toggle", Map("team" -> "Toguru team"), meta(10))),
Entry("toggle-1", ActivationCreated(meta(20), 0, Map("country" -> StringSeq(Seq("de-DE", "de-AT"))), None)),
Entry("toggle-1", ActivationUpdated(meta(30), 0, Map("country" -> StringSeq(Seq("de-DE", "de-AT"))), rollout(34))),
Entry("toggle-1", ActivationDeleted(meta(40), 0)),
Entry("toggle-1", ActivationCreated(meta(50), 0, rollout = rollout(10))),
Entry("toggle-1", ActivationUpdated(meta(60), 0, rollout = rollout(100))),
Entry("toggle-1", ActivationDeleted(meta(70), 0))
)
def createActor(events: Seq[Entry] = List.empty, time: Long = 0, retentionLength: Int = 10, retentionTime: FiniteDuration = 10.seconds): ActorRef =
system.actorOf(Props(new AuditLogActor((_,_) => (), () => time, retentionTime, retentionLength, events)))
def sendEvents(actor: ActorRef) =
events.foreach(e => actor ! EventEnvelope(0, e.id, 0, e.event))
"audit log actor" should {
"return current audit log" in {
val actor = createActor(events)
val response = await(actor ? GetLog)
response mustBe events
}
"build audit log from events" in {
val actor = createActor()
val id1 = "toggle-1"
val id2 = "toggle-2"
sendEvents(actor)
val response = await(actor ? GetLog)
response mustBe events.reverse
}
"truncate audit log on insertion" in {
val actor = createActor(retentionLength = 3)
sendEvents(actor)
val response = await(actor ? GetLog)
response mustBe events.reverse.take(3)
}
"truncate audit log on cleanup" in {
val actor = createActor(events = events.reverse, retentionTime = 100.millis, time = 125)
actor ! Cleanup
val response = await(actor ? GetLog)
response.asInstanceOf[Seq[_]].length mustBe 5
response mustBe events.reverse.take(5)
}
}
}
|
andreas-schroeder/toguru
|
test/toguru/toggles/AuditLogActorSpec.scala
|
Scala
|
mit
| 2,510 |
package com.yetu.youtubeapp.services
import com.yetu.youtubeapp.utils.{FakeGlobal, BaseSpec}
import org.scalatest.time._
import play.api.Logger
import play.api.libs.json.{JsValue, Json}
import play.api.libs.ws.WSResponse
class InboxServiceSpec extends BaseSpec {
implicit val defaultPatience =
PatienceConfig(timeout = Span(6, Seconds), interval = Span(15, Millis))
"InboxService" must {
"send Message to Inbox and receives OK" in {
val responseFuture = InboxService.sendToInbox(jsonDummyValue, fakeValidJWTAccessToken, "stream")
whenReady(responseFuture){
(x:WSResponse) =>
Logger.info(s"${x.body}")
x.status must be < 300
x.status must be > 200
}
}
"send Message with invalid access token to Inbox and receives 401" in {
val responseFuture = InboxService.sendToInbox(jsonDummyValue, "invalid_access_token", "stream")
whenReady(responseFuture){
(x:WSResponse) =>
Logger.info(s"${x.body}")
x.status mustBe 401
}
}
}
}
|
yetu/youtube-app
|
test/com/yetu/youtubeapp/services/InboxServiceSpec.scala
|
Scala
|
mit
| 1,059 |
package com.codiply.barrio.helpers
import java.nio.file.Files
import java.nio.file.Paths
import scopt.OptionParser
import com.codiply.barrio.geometry.Metric
object ArgsConfig {
val defaultMaxPointsPerLeaf = 128
val defaultTreesPerNode = 3
}
sealed trait DataSourceType
object DataSourceType {
case object LocalDataSource extends DataSourceType
case object WebDataSource extends DataSourceType
case object S3DataSource extends DataSourceType
}
case class ArgsConfig(
cache: Boolean = false,
coordinateSeparator: String = ",",
dimensions: Int = -1,
encoding: String = "UTF-8",
file: String = "",
separator: String = ":::",
maxPointsPerLeaf: Int = ArgsConfig.defaultMaxPointsPerLeaf,
metric: String = Metric.euclidean.name,
randomSeed: Option[Int] = None,
s3Bucket: Option[String] = None,
seedOnlyNode: Boolean = false,
treesPerNode: Int = ArgsConfig.defaultTreesPerNode,
isUrl: Boolean = false) {
import DataSourceType._
val dataSourceType =
(isUrl, s3Bucket) match {
case (true, _) => WebDataSource
case (_, Some(_)) => S3DataSource
case _ => LocalDataSource
}
}
object ArgsParser {
import DataSourceType._
private val parser = new OptionParser[ArgsConfig]("barrio") {
override def showUsageOnError = true
head("Barrio", VersionHelper.version)
help("help")
version("version")
opt[Unit]("seedOnlyNode")
.maxOccurs(1)
.action { (v, conf) => conf.copy(seedOnlyNode = true) }
.text("flag for making this node act as a seed for the cluster only")
opt[String]('f', "file")
.maxOccurs(1)
.action { (v, conf) => conf.copy(file = v) }
.text("the path to the input file containing the data points")
opt[String]("encoding")
.maxOccurs(1)
.action { (v, conf) => conf.copy(encoding = v)}
.text("the encoding to be used when loading the data")
opt[String]('m', "metric")
.maxOccurs(1)
.validate(m =>
if (Metric.allMetrics.contains(m.toLowerCase)) {
success
} else {
val options = Metric.allMetrics.keys.mkString(", ")
failure(s"Unkown metric ${m}. Use one of the following options: ${options}.")
})
.action( (m, conf) => conf.copy(metric = m) )
.text("the metric for calculating distances")
opt[Int]('d', "dimensions")
.maxOccurs(1)
.validate(d =>
if (d > 0) {
success
} else {
failure("Value <dimensions> must be >0")
})
.action( (v, conf) => conf.copy(dimensions = v) )
.text("the number of dimensions")
opt[Int]('l', "maxPointsPerLeaf")
.maxOccurs(1)
.validate(n =>
if (n > 0) {
success
} else {
failure("Value <maxPointsPerLeaf> must be >0")
})
.action( (v, conf) => conf.copy(maxPointsPerLeaf = v) )
.text("the maximum number of points per leaf")
opt[Int]('s', "randomSeed")
.maxOccurs(1)
.action( (v, conf) => conf.copy(randomSeed = Some(v)) )
.text("the seed for the random number generator")
opt[Int]('t', "treesPerNode")
.maxOccurs(1)
.validate(n =>
if (n > 0) {
success
} else {
failure("Value <treesPerNode> must be >0")
})
.action( (v, conf) => conf.copy(treesPerNode = v) )
.text("the number of trees per node")
opt[String]("separator")
.maxOccurs(1)
.action( (v, conf) => conf.copy(separator = v) )
.text("the separator used in the input data for separating the id, the coordinates and additional data")
opt[String]("coordinateSeparator")
.maxOccurs(1)
.action( (v, conf) => conf.copy(coordinateSeparator = v) )
.text("the separator used in the input data for separating the coordinates within the coordinates field")
opt[Unit]("isUrl")
.maxOccurs(1)
.action( (_, conf) => conf.copy(isUrl = true) )
.text("flag for loading data from the web")
opt[String]("s3Bucket")
.maxOccurs(1)
.action( (v, conf) => conf.copy(s3Bucket = Some(v)) )
.text("S3 bucket containing the data file")
opt[Unit]("cache")
.maxOccurs(1)
.action( (_, conf) => conf.copy(cache = true) )
.text("flag for caching responses (when possible)")
help("help").text("prints this usage text")
checkConfig(conf => {
conf match {
case _ if (!conf.seedOnlyNode && conf.file.isEmpty()) =>
failure("Missing option --file")
case _ if (!conf.seedOnlyNode && conf.dimensions < 0) =>
failure("Missing option --dimensions")
case _ if (conf.dataSourceType == LocalDataSource && !Files.exists(Paths.get(conf.file))) =>
failure("Value <file> refers to non-existent file")
case _ if (conf.separator == conf.coordinateSeparator) =>
failure("value <separator> cannot be the same as <coordinateSeparator>")
case _ => success
}
})
}
def parse(args: Seq[String]): Option[ArgsConfig] = {
parser.parse(args, ArgsConfig())
}
}
|
codiply/barrio
|
src/main/scala/com/codiply/barrio/helpers/ArgsParser.scala
|
Scala
|
apache-2.0
| 5,159 |
package com.twitter.scalding
import com.twitter.algebird.Semigroup
object ExecutionUtil {
/**
* Generate a list of executions from a date range
*
* @param duration Duration to split daterange
* @param parallelism How many jobs to run in parallel
* @param fn Function to run a execution given a date range
* @return Sequence of Executions per Day
*/
def executionsFromDates[T](duration: Duration, parallelism: Int = 1)(fn: DateRange => Execution[T])(implicit dr: DateRange): Seq[Execution[T]] =
dr.each(duration).map(fn).toSeq
/**
* Split a DateRange and allow for max parallel running of executions
*
* @param duration Duration to split daterange
* @param parallelism How many jobs to run in parallel
* @param fn Function to run a execution given a date range
* @return Seq of Dates split by Duration with corresponding execution result
*/
def runDatesWithParallelism[T](duration: Duration, parallelism: Int = 1)(fn: DateRange => Execution[T])(implicit dr: DateRange): Execution[Seq[(DateRange, T)]] = {
val dates = dr.each(duration).toSeq
Execution.withParallelism(dates.map(fn), parallelism).map(e => dates.zip(e))
}
/**
* Split a DateRange and allow for max parallel running of executions
*
* @param duration Duration to split daterange
* @param parallelism How many jobs to run in parallel
* @param fn Function to run a execution given a date range
* @return Execution of Sequences
*/
def runDateRangeWithParallelism[T](duration: Duration, parallelism: Int = 1)(fn: DateRange => Execution[T])(implicit dr: DateRange): Execution[Seq[T]] =
runDatesWithParallelism(duration, parallelism)(fn).map(_.map{ case (_, t) => t })
/**
* Same as runDateRangeWithParallelism, but sums the sequence
* of values after running. This is useful when you want to do a
* calculation in parallel over many durations and join the results
* together.
*
* For example, a common use case is when T is
* a TypedPipe[U] and you want to independently compute
* the pipes on each day and union them into a
* single TypedPipe at the end.
*
* Another possible use case would be if the executions were created by
* summing intermediate monoids (e.g. T was a Map[String,HLL] since
* algebird supports monoids for maps and hll) and you wanted to do a
* final aggregation of the Monoids computed for each duration.
*/
def runDateRangeWithParallelismSum[T](duration: Duration, parallelism: Int = 1)(fn: DateRange => Execution[T])(implicit dr: DateRange, semigroup: Semigroup[T]): Execution[T] = {
require(dr.each(duration).nonEmpty, s"Date Range can not be empty")
runDateRangeWithParallelism(duration, parallelism)(fn)(dr)
.map(_.reduceLeft[T]{ case (l, r) => Semigroup.plus(l, r) })
}
}
|
tglstory/scalding
|
scalding-core/src/main/scala/com/twitter/scalding/ExecutionUtil.scala
|
Scala
|
apache-2.0
| 2,822 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.holdenkarau.spark.testing
import java.util.Date
import org.apache.spark._
import org.scalatest.{BeforeAndAfterAll, Suite}
/** Shares a local `SparkContext` between all tests in a suite and closes it at the end. */
trait SharedSparkContext extends BeforeAndAfterAll with SparkContextProvider {
self: Suite =>
@transient private var _sc: SparkContext = _
override def sc: SparkContext = _sc
val appID = new Date().toString + math.floor(math.random * 10E4).toLong.toString
override val conf = new SparkConf().
setMaster("local[*]").
setAppName("test").
set("spark.ui.enabled", "false").
set("spark.app.id", appID)
override def beforeAll() {
_sc = new SparkContext(conf)
super.beforeAll()
}
override def afterAll() {
try {
LocalSparkContext.stop(_sc)
_sc = null
} finally {
super.afterAll()
}
}
}
|
mahmoudhanafy/spark-testing-base
|
src/main/1.3/scala/com/holdenkarau/spark/testing/SharedSparkContext.scala
|
Scala
|
apache-2.0
| 1,689 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | Your SKilL scala Binding **
** \\__ \\ ' <| | | |__ generated: 01.02.2019 **
** |___/_|\\_\\_|_|____| by: feldentm **
\\* */
package de.ust.skill.sir.api.internal
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.collection.mutable.WrappedArray
import java.nio.BufferUnderflowException
import java.nio.MappedByteBuffer
import java.util.Arrays
import de.ust.skill.common.jvm.streams.MappedInStream
import de.ust.skill.common.jvm.streams.MappedOutStream
import de.ust.skill.common.scala.api.PoolSizeMissmatchError
import de.ust.skill.common.scala.api.RestrictionCheckFailed
import de.ust.skill.common.scala.api.SkillObject
import de.ust.skill.common.scala.internal.AutoField
import de.ust.skill.common.scala.internal.BulkChunk
import de.ust.skill.common.scala.internal.Chunk
import de.ust.skill.common.scala.internal.DistributedField
import de.ust.skill.common.scala.internal.IgnoredField
import de.ust.skill.common.scala.internal.FieldDeclaration
import de.ust.skill.common.scala.internal.KnownField
import de.ust.skill.common.scala.internal.LazyField
import de.ust.skill.common.scala.internal.SimpleChunk
import de.ust.skill.common.scala.internal.SingletonStoragePool
import de.ust.skill.common.scala.internal.fieldTypes._
import de.ust.skill.common.scala.internal.restrictions._
/**
* hint[] UserdefinedType.hints
*/
final class F_UserdefinedType_hints(
_index : Int,
_owner : UserdefinedTypePool,
_type : FieldType[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint]])
extends FieldDeclaration[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint],_root_.de.ust.skill.sir.UserdefinedType](_type,
"hints",
_index,
_owner)
with KnownField[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint],_root_.de.ust.skill.sir.UserdefinedType] {
override def createKnownRestrictions : Unit = {
}
override def read(part : MappedInStream, target : Chunk) {
val d = owner.data
val in = part.view(target.begin.toInt, target.end.toInt)
try {
target match {
case c : SimpleChunk ⇒
var i = c.bpo.toInt
val high = i + c.count
while (i != high) {
d(i).asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].Internal_hints = t.read(in).asInstanceOf[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint]]
i += 1
}
case bci : BulkChunk ⇒
val blocks = owner.blocks
var blockIndex = 0
while (blockIndex < bci.blockCount) {
val b = blocks(blockIndex)
blockIndex += 1
var i = b.bpo
val end = i + b.dynamicCount
while (i != end) {
d(i).asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].Internal_hints = t.read(in).asInstanceOf[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint]]
i += 1
}
}
}
} catch {
case e : BufferUnderflowException ⇒
throw new PoolSizeMissmatchError(dataChunks.size - 1,
part.position() + target.begin,
part.position() + target.end,
this, in.position())
}
if(!in.eof())
throw new PoolSizeMissmatchError(dataChunks.size - 1,
part.position() + target.begin,
part.position() + target.end,
this, in.position())
}
def offset: Unit = {
val data = owner.data
var result = 0L
dataChunks.last match {
case c : SimpleChunk ⇒
var i = c.bpo.toInt
val high = i + c.count
while (i != high) {
val v = data(i).asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].Internal_hints
result += (if(null == v) 1 else V64.offset(v.size))
if(null != v) v.foreach { v => result += (if (null == v) 1 else V64.offset(v.getSkillID)) }
i += 1
}
case bci : BulkChunk ⇒
val blocks = owner.blocks
var blockIndex = 0
while (blockIndex < bci.blockCount) {
val b = blocks(blockIndex)
blockIndex += 1
var i = b.bpo
val end = i + b.dynamicCount
while (i != end) {
val v = data(i).asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].Internal_hints
result += (if(null == v) 1 else V64.offset(v.size))
if(null != v) v.foreach { v => result += (if (null == v) 1 else V64.offset(v.getSkillID)) }
i += 1
}
}
}
cachedOffset = result
}
def write(out: MappedOutStream): Unit = {
val data = owner.data
dataChunks.last match {
case c : SimpleChunk ⇒
var i = c.bpo.toInt
val high = i + c.count
while (i != high) {
val v = data(i).asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].Internal_hints
if(null == v) out.i8(0) else { out.v64(v.size)
v.foreach { v => if (null == v) out.i8(0) else out.v64(v.getSkillID) }}
i += 1
}
case bci : BulkChunk ⇒
val blocks = owner.blocks
var blockIndex = 0
while (blockIndex < bci.blockCount) {
val b = blocks(blockIndex)
blockIndex += 1
var i = b.bpo
val end = i + b.dynamicCount
while (i != end) {
val v = data(i).asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].Internal_hints
if(null == v) out.i8(0) else { out.v64(v.size)
v.foreach { v => if (null == v) out.i8(0) else out.v64(v.getSkillID) }}
i += 1
}
}
}
}
// note: reflective field access will raise exception for ignored fields
override def getR(i : SkillObject) : scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint] = i.asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].hints
override def setR(i : SkillObject, v : scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint]) {
i.asInstanceOf[_root_.de.ust.skill.sir.UserdefinedType].hints = v.asInstanceOf[scala.collection.mutable.ArrayBuffer[_root_.de.ust.skill.sir.Hint]]
}
}
|
skill-lang/skill
|
src/main/scala/de/ust/skill/sir/api/internal/F_UserdefinedType_hints.scala
|
Scala
|
bsd-3-clause
| 6,728 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.stream
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.logical.TimeAttributeWindowingStrategy
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, InputProperty}
import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecWindowTableFunction
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import java.util
import scala.collection.JavaConverters._
/**
* Stream physical RelNode for window table-valued function.
*/
class StreamPhysicalWindowTableFunction(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
val windowing: TimeAttributeWindowingStrategy,
val emitPerRecord: Boolean)
extends SingleRel(cluster, traitSet, inputRel)
with StreamPhysicalRel {
override def requireWatermark: Boolean = true
override def deriveRowType(): RelDataType = outputRowType
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new StreamPhysicalWindowTableFunction(
cluster,
traitSet,
inputs.get(0),
outputRowType,
windowing,
emitPerRecord)
}
def copy(emitPerRecord: Boolean): StreamPhysicalWindowTableFunction = {
new StreamPhysicalWindowTableFunction(
cluster,
traitSet,
input,
outputRowType,
windowing,
emitPerRecord)
}
override def explainTerms(pw: RelWriter): RelWriter = {
val inputFieldNames = getInput.getRowType.getFieldNames.asScala.toArray
super.explainTerms(pw)
.item("window", windowing.toSummaryString(inputFieldNames))
.itemIf("emitPerRecord", "true", emitPerRecord)
}
override def translateToExecNode(): ExecNode[_] = {
new StreamExecWindowTableFunction(
windowing,
emitPerRecord,
InputProperty.DEFAULT,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
}
|
StephanEwen/incubator-flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalWindowTableFunction.scala
|
Scala
|
apache-2.0
| 2,923 |
/*
* Copyright 2012 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.applications.examples.basic.cluster
import simx.components.renderer.jvr.{JVRInit, JVRConnector}
import simx.components.physics.jbullet.JBulletComponent
import simx.core.components.physics.PhysicsConfiguration
import simx.core.components.renderer.messages._
import simx.core.helper.Execute
import simx.core.{ApplicationConfig, SimXClusterApplication}
import simx.core.component.{Soft, ExecutionStrategy}
import simx.applications.examples.basic.objects.{Ball, Light, Table}
import collection.immutable
import simx.core.svaractor.SVarActor
import simplex3d.math.floatx.{ConstVec3f, Vec3f}
import simx.core.entity.Entity
/**
* A clustered version of [[simx.applications.examples.basic.ExampleApplication]].
*
* This application simulated the same scene as [[simx.applications.examples.basic.ExampleApplication]] but is using
* a display description that is describing two displays connected to different cluster nodes.
*
* @author Stephan Rehfeld
* @author Dennis Wiebusch
*
* @param bootstrapApp Pass 'true' if this instance should run the bootstrap code. Typically the return value of
* [[simx.core.SimXClusterApplication.startClusterSubsystem]].
*/
class ExampleClusterApplication( bootstrapApp : Boolean ) extends SimXClusterApplication( bootstrapApp ) with JVRInit {
var renderer : Option[SVarActor.Ref] = None
override def startUp() {
super.startUp()
}
/**
* Defines the components that [[simx.core.SimXApplication]] has to create
*/
override protected def applicationConfiguration = ApplicationConfig(Nil)
override protected def onStartUp() {
println("creating components")
// create components
createActor(new JBulletComponent())( (physics) => {
println( " physics created")
createActor( new JVRConnector())( (renderer) => {
println( " renderer created" )
this.renderer = Some( renderer )
// send configs
renderer ! ConfigureRenderer( ClusterDisplayDescription(800, 600),
effectsConfiguration = EffectsConfiguration( "low","none" ) )
physics ! PhysicsConfiguration (ConstVec3f(0, -9.81f, 0))
// register for exit on close:
exitOnClose(renderer, shutdown)
this.addJobIn(5000){
val executionStrategy = ExecutionStrategy where physics runs Soft( 60 ) and renderer runs Soft( 60 )
this.start( executionStrategy )
create(applicationConfiguration)
}
})()
})()
}
protected def configureComponents(components: immutable.Map[Symbol, SVarActor.Ref]) {}
override protected def createEntities() {
println("creating entities")
Execute serialized
realize( Table("the table", Vec3f(3f, 1f, 2f), Vec3f(0f, -1.5f, -7f) ) ) and
realize( Light("the light", Vec3f(-4f, 8f, -7f), Vec3f(270f, -25f, 0f) ) ) and
realize( Ball ("the ball", 0.2f, Vec3f(0f, 1f, -7f) ) ) exec( x => {
})
}
protected def removeFromLocalRep(e : Entity){
}
override protected def finishConfiguration() {
println("application is running")
}
}
/**
* This object contains the main method to start the ExampleClusterApplication.
*
* @author Stephan Rehfeld
*/
object ExampleClusterApplication {
def main( args : Array[String] ) {
println( "----------------- SimulatorX Cluster Examples: Example Application -------------" )
println( "| This application simulated the same scene as ExampleApplication but is using |" )
println( "| a display description that is describing two displays connected to different |" )
println( "| cluster nodes. |" )
println( "| |" )
println( "| Suggested start parameter to run on one machine: |" )
println( "| #1 --bootstrap-app --name front --interface 127.0.0.1 --port 9000 |" )
println( "| #2 --name back --interface 127.0.0.1 --port 9001 --seed-node 127.0.0.1:9000 |" )
println( "--------------------------------------------------------------------------------" )
def clusterNodes = Set('front, 'back)
val bootstrapApp = SimXClusterApplication.startClusterSubsystem( args, clusterNodes )
SVarActor.createActor(new ExampleClusterApplication( bootstrapApp ))
}
}
|
simulator-x/basicexamples
|
src/simx/applications/examples/basic/cluster/ExampleClusterApplication.scala
|
Scala
|
apache-2.0
| 5,231 |
/*
* Copyright (c) 2014 Oculus Info Inc.
* http://www.oculusinfo.com/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.tilegen.examples.datagen
import org.apache.spark._
import org.apache.spark.SparkContext._
import java.awt.geom.Point2D
import com.oculusinfo.binning.BinIndex
import com.oculusinfo.binning.BinIterator
import com.oculusinfo.binning.TileIndex
import com.oculusinfo.binning.TilePyramid
import com.oculusinfo.binning.TileData
import com.oculusinfo.tilegen.spark.SparkConnector
import com.oculusinfo.tilegen.util.ArgumentParser
/**
* A sample data generation object that generates a bunch of lines
* through the center of the area of interest.
*/
object LineStarSampleGenerator {
def endPointsToLine (startX: Double, startY: Double,
endX: Double, endY: Double,
stepSize: Double): IndexedSeq[(Double, Double)] = {
// Bresenham's algorithm, as per wikipedia
var steep = math.abs(endY-startY) > math.abs(endX-startX)
var (x0, y0, x1, y1) =
if (steep) {
if (startY > endY) {
(endY, endX, startY, startX)
} else {
(startY, startX, endY, endX)
}
} else {
if (startX > endX) {
(endX, endY, startX, startY)
} else {
(startX, startY, endX, endY)
}
}
val deltax = x1-x0
val deltay = math.abs(y1-y0)
var error = deltax/2
var y = y0
val ystep = if (y0 < y1) stepSize else -stepSize
Range(0, ((x1-x0)/stepSize).ceil.toInt).map(n => x0+n*stepSize).map(x =>
{
val ourY = y
error = error - deltay
if (error < 0) {
y = y + ystep
error = error + deltax
}
if (steep) (ourY, x)
else (x, ourY)
}
)
}
def main (args: Array[String]): Unit = {
// Draw a bunch of lines across the AoI all through the center
val argParser = new ArgumentParser(args)
val fileName = argParser.getString("f",
"The file to which to write the sample data")
val topLevel = argParser.getInt(
"top",
"The level at which our raidal lines will first fill the area of interest. At all "
+"levels at this level and above, every pixel in the data set will have a count of at "
+"least one. At levels below this, there will be empty pixels",
Option(0))
val bottomLevel = argParser.getInt(
"bottom",
"The lowest level at which a line will display as continuous, with no breaks.",
Option(10))
val sc = argParser.getSparkConnector().createContext(Some("Create sample data for live tile demonstration"))
val linesPerSide = 256 << topLevel
val linePartitions = if (bottomLevel < 6) 1 else (1 << (bottomLevel-6))
val pixelsPerLine = 256 << bottomLevel
val increment = 2.0/pixelsPerLine
val lineIndex = sc.makeRDD(Range(0, linesPerSide), linePartitions)
val data = lineIndex.flatMap(n =>
{
// Get a start position from -1 to 1
val startPos = n.toDouble/linesPerSide.toDouble * 2.0 - 1.0
val xLine = endPointsToLine(-1.0, -startPos, 1.0, startPos, increment)
val yLine = endPointsToLine(-startPos, -1.0, startPos, 1.0, increment)
xLine union yLine
}
).map(p =>
"%.8f\\t%.8f\\t1.0".format(p._1, p._2)
)
data.saveAsTextFile(fileName)
}
}
|
unchartedsoftware/aperture-tiles
|
tile-generation/src/main/scala/com/oculusinfo/tilegen/examples/datagen/lineStar.scala
|
Scala
|
mit
| 4,285 |
/*
* OpenURP, Open University Resouce Planning
*
* Copyright (c) 2013-2014, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Beangle. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.attendance.ws.web.app
import org.beangle.commons.lang.Dates
import org.beangle.commons.lang.Strings.{isEmpty, replace}
import org.beangle.commons.logging.Logging
import org.openurp.edu.attendance.ws.impl.{AppConfig, BaseDataService, DeviceRegistry}
import org.openurp.edu.attendance.ws.web.util.Consts.{DeviceId, Rule}
import org.openurp.edu.attendance.ws.web.util.Params
import javax.servlet.{ServletRequest, ServletResponse}
import javax.servlet.http.HttpServlet
/**
* 发送教室表地址
*
* @author chaostone
* @version 1.0, 2014/03/22
* @since 0.0.1
*/
class CourseTableServlet extends HttpServlet with Logging {
var baseDataService: BaseDataService = _
var deviceRegistry: DeviceRegistry = _
override def service(req: ServletRequest, res: ServletResponse) {
val params = Params.require(DeviceId).get(req, Rule)
var rs = ""
if (!params.ok) {
//rs = params.msg.values.mkString(";")
rs = "devid needed!"
} else {
val devid: Int = params(DeviceId)
deviceRegistry.get(devid) foreach { d =>
var url = AppConfig.courseURL
baseDataService.getSemesterId(Dates.today) foreach { semesterId =>
url = replace(url, "${semesterId}", String.valueOf(semesterId))
rs = replace(url, "${roomId}", String.valueOf(d.room.id))
}
}
}
if (!isEmpty(rs)) res.getWriter().append(rs)
}
}
|
openurp/edu-core
|
attendance/ws/src/main/scala/org/openurp/edu/attendance/ws/web/app/CourseTableServlet.scala
|
Scala
|
gpl-3.0
| 2,145 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.examples
import org.apache.hadoop.fs.s3a.Constants.{ACCESS_KEY, ENDPOINT, SECRET_KEY}
import org.apache.spark.sql.SparkSession
import org.slf4j.{Logger, LoggerFactory}
import org.apache.carbondata.core.metadata.datatype.{DataTypes, Field}
import org.apache.carbondata.sdk.file.{CarbonWriter, Schema}
import org.apache.carbondata.spark.util.CarbonSparkUtil
/**
* Generate data and write data to S3
* User can generate different numbers of data by specifying the number-of-rows in parameters
*/
object S3UsingSdkExample {
// prepare SDK writer output
def buildTestData(
args: Array[String],
path: String,
num: Int = 3): Any = {
// getCanonicalPath gives path with \, but the code expects /.
val writerPath = path.replace("\\", "/")
val fields: Array[Field] = new Array[Field](3)
fields(0) = new Field("name", DataTypes.STRING)
fields(1) = new Field("age", DataTypes.INT)
fields(2) = new Field("height", DataTypes.DOUBLE)
try {
val builder = CarbonWriter.builder()
val writer =
builder.outputPath(writerPath)
.uniqueIdentifier(System.currentTimeMillis)
.withBlockSize(2)
.writtenBy("S3UsingSdkExample")
.withHadoopConf(ACCESS_KEY, args(0))
.withHadoopConf(SECRET_KEY, args(1))
.withHadoopConf(ENDPOINT, CarbonSparkUtil.getS3EndPoint(args))
.withCsvInput(new Schema(fields)).build()
var i = 0
val row = num
while (i < row) {
writer.write(Array[String]("robot" + i, String.valueOf(i), String.valueOf(i.toDouble / 2)))
i += 1
}
writer.close()
} catch {
case e: Exception => throw e
}
}
/**
* This example demonstrate usage of
* 1. create carbon table with storage location on object based storage
* like AWS S3, Huawei OBS, etc
* 2. load data into carbon table, the generated file will be stored on object based storage
* query the table.
*
* @param args require three parameters "Access-key" "Secret-key"
* "table-path on s3" "s3-endpoint" "spark-master"
*/
def main(args: Array[String]) {
val logger: Logger = LoggerFactory.getLogger(this.getClass)
if (args.length < 2 || args.length > 6) {
logger.error("Usage: java CarbonS3Example <access-key> <secret-key>" +
"[table-path-on-s3] [s3-endpoint] [number-of-rows] [spark-master]")
System.exit(0)
}
val (accessKey, secretKey, endpoint) = CarbonSparkUtil.getKeyOnPrefix(args(2))
val spark = SparkSession
.builder()
.master(getSparkMaster(args))
.appName("S3UsingSDKExample")
.config("spark.driver.host", "localhost")
.config(accessKey, args(0))
.config(secretKey, args(1))
.config(endpoint, CarbonSparkUtil.getS3EndPoint(args))
.config("spark.sql.extensions", "org.apache.spark.sql.CarbonExtensions")
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
val path = if (args.length < 3) {
"s3a://sdk/WriterOutput2 "
} else {
args(2)
}
val num = if (args.length > 4) {
Integer.parseInt(args(4))
} else {
3
}
buildTestData(args, path, num)
spark.sql("DROP TABLE IF EXISTS s3_sdk_table")
spark.sql(s"CREATE EXTERNAL TABLE s3_sdk_table STORED AS carbondata" +
s" LOCATION '$path'")
spark.sql("SELECT * FROM s3_sdk_table LIMIT 10").show()
spark.stop()
}
def getSparkMaster(args: Array[String]): String = {
if (args.length == 6) args(5)
else "local"
}
}
|
zzcclp/carbondata
|
examples/spark/src/main/scala/org/apache/carbondata/examples/S3UsingSDkExample.scala
|
Scala
|
apache-2.0
| 4,416 |
package parsers.solutions
import scala.util.parsing.combinator.JavaTokenParsers
trait Exercise4 extends JavaTokenParsers {
def number: Parser[Double] = floatingPointNumber ^^ { _.toDouble }
def factor: Parser[Double] = number | "(" ~> expr <~ ")"
def term: Parser[Double] = {
(factor ~ ((("*"|"/") ~ factor)*)) ^^ { case n ~ ops =>
ops.foldRight(n) {
case ("*" ~ f, p) => p * f
case ("/" ~ f, p) => p / f
}
}
}
def expr: Parser[Double] = {
(term ~ ((("+"|"-") ~ term)*)) ^^ { case n ~ ops =>
ops.foldRight(n) {
case ("+" ~ t, e) => e + t
case ("-" ~ t, e) => e - t
}
}
}
def eval(s: String): Option[Double] = parseAll(expr, s) match {
case Success(value, _) => Some(value)
case _ => None
}
}
|
julienrf/scala-lessons
|
highlights/parsers/code/src/main/scala/parsers/solutions/Exercise4.scala
|
Scala
|
mit
| 795 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import collection._
import collection.JavaConversions._
import java.util.concurrent.atomic.AtomicBoolean
import kafka.common.{TopicAndPartition, StateChangeFailedException}
import kafka.utils.{ZkUtils, ReplicationUtils, Logging}
import org.I0Itec.zkclient.IZkChildListener
import org.apache.log4j.Logger
import kafka.controller.Callbacks._
import kafka.utils.Utils._
/**
* This class represents the state machine for replicas. It defines the states that a replica can be in, and
* transitions to move the replica to another legal state. The different states that a replica can be in are -
* 1. NewReplica : The controller can create new replicas during partition reassignment. In this state, a
* replica can only get become follower state change request. Valid previous
* state is NonExistentReplica
* 2. OnlineReplica : Once a replica is started and part of the assigned replicas for its partition, it is in this
* state. In this state, it can get either become leader or become follower state change requests.
* Valid previous state are NewReplica, OnlineReplica or OfflineReplica
* 3. OfflineReplica : If a replica dies, it moves to this state. This happens when the broker hosting the replica
* is down. Valid previous state are NewReplica, OnlineReplica
* 4. ReplicaDeletionStarted: If replica deletion starts, it is moved to this state. Valid previous state is OfflineReplica
* 5. ReplicaDeletionSuccessful: If replica responds with no error code in response to a delete replica request, it is
* moved to this state. Valid previous state is ReplicaDeletionStarted
* 6. ReplicaDeletionIneligible: If replica deletion fails, it is moved to this state. Valid previous state is ReplicaDeletionStarted
* 7. NonExistentReplica: If a replica is deleted successfully, it is moved to this state. Valid previous state is
* ReplicaDeletionSuccessful
*/
class ReplicaStateMachine(controller: KafkaController) extends Logging {
private val controllerContext = controller.controllerContext
private val controllerId = controller.config.brokerId
private val zkClient = controllerContext.zkClient
var replicaState: mutable.Map[PartitionAndReplica, ReplicaState] = mutable.Map.empty
val brokerRequestBatch = new ControllerBrokerRequestBatch(controller)
private val hasStarted = new AtomicBoolean(false)
this.logIdent = "[Replica state machine on controller " + controller.config.brokerId + "]: "
private val stateChangeLogger = KafkaController.stateChangeLogger
/**
* Invoked on successful controller election. First registers a broker change listener since that triggers all
* state transitions for replicas. Initializes the state of replicas for all partitions by reading from zookeeper.
* Then triggers the OnlineReplica state change for all replicas.
*/
def startup() {
// initialize replica state
initializeReplicaState()
hasStarted.set(true)
// move all Online replicas to Online
handleStateChanges(controllerContext.allLiveReplicas(), OnlineReplica)
info("Started replica state machine with initial state -> " + replicaState.toString())
}
// register broker change listener
def registerListeners() {
registerBrokerChangeListener()
}
/**
* Invoked on controller shutdown.
*/
def shutdown() {
hasStarted.set(false)
replicaState.clear()
}
/**
* This API is invoked by the broker change controller callbacks and the startup API of the state machine
* @param replicas The list of replicas (brokers) that need to be transitioned to the target state
* @param targetState The state that the replicas should be moved to
* The controller's allLeaders cache should have been updated before this
*/
def handleStateChanges(replicas: Set[PartitionAndReplica], targetState: ReplicaState,
callbacks: Callbacks = (new CallbackBuilder).build) {
if(replicas.size > 0) {
info("Invoking state change to %s for replicas %s".format(targetState, replicas.mkString(",")))
try {
brokerRequestBatch.newBatch()
replicas.foreach(r => handleStateChange(r, targetState, callbacks))
brokerRequestBatch.sendRequestsToBrokers(controller.epoch, controllerContext.correlationId.getAndIncrement)
}catch {
case e: Throwable => error("Error while moving some replicas to %s state".format(targetState), e)
}
}
}
/**
* This API exercises the replica's state machine. It ensures that every state transition happens from a legal
* previous state to the target state. Valid state transitions are:
* NonExistentReplica --> NewReplica
* --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
* partition to every live broker
*
* NewReplica -> OnlineReplica
* --add the new replica to the assigned replica list if needed
*
* OnlineReplica,OfflineReplica -> OnlineReplica
* --send LeaderAndIsr request with current leader and isr to the new replica and UpdateMetadata request for the
* partition to every live broker
*
* NewReplica,OnlineReplica,OfflineReplica,ReplicaDeletionIneligible -> OfflineReplica
* --send StopReplicaRequest to the replica (w/o deletion)
* --remove this replica from the isr and send LeaderAndIsr request (with new isr) to the leader replica and
* UpdateMetadata request for the partition to every live broker.
*
* OfflineReplica -> ReplicaDeletionStarted
* --send StopReplicaRequest to the replica (with deletion)
*
* ReplicaDeletionStarted -> ReplicaDeletionSuccessful
* -- mark the state of the replica in the state machine
*
* ReplicaDeletionStarted -> ReplicaDeletionIneligible
* -- mark the state of the replica in the state machine
*
* ReplicaDeletionSuccessful -> NonExistentReplica
* -- remove the replica from the in memory partition replica assignment cache
* @param partitionAndReplica The replica for which the state transition is invoked
* @param targetState The end state that the replica should be moved to
*/
def handleStateChange(partitionAndReplica: PartitionAndReplica, targetState: ReplicaState,
callbacks: Callbacks) {
val topic = partitionAndReplica.topic
val partition = partitionAndReplica.partition
val replicaId = partitionAndReplica.replica
val topicAndPartition = TopicAndPartition(topic, partition)
if (!hasStarted.get)
throw new StateChangeFailedException(("Controller %d epoch %d initiated state change of replica %d for partition %s " +
"to %s failed because replica state machine has not started")
.format(controllerId, controller.epoch, replicaId, topicAndPartition, targetState))
val currState = replicaState.getOrElseUpdate(partitionAndReplica, NonExistentReplica)
try {
val replicaAssignment = controllerContext.partitionReplicaAssignment(topicAndPartition)
targetState match {
case NewReplica =>
assertValidPreviousStates(partitionAndReplica, List(NonExistentReplica), targetState)
// start replica as a follower to the current leader for its partition
val leaderIsrAndControllerEpochOpt = ReplicationUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition)
leaderIsrAndControllerEpochOpt match {
case Some(leaderIsrAndControllerEpoch) =>
if(leaderIsrAndControllerEpoch.leaderAndIsr.leader == replicaId)
throw new StateChangeFailedException("Replica %d for partition %s cannot be moved to NewReplica"
.format(replicaId, topicAndPartition) + "state as it is being requested to become leader")
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(List(replicaId),
topic, partition, leaderIsrAndControllerEpoch,
replicaAssignment)
case None => // new leader request will be sent to this replica when one gets elected
}
replicaState.put(partitionAndReplica, NewReplica)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState,
targetState))
case ReplicaDeletionStarted =>
assertValidPreviousStates(partitionAndReplica, List(OfflineReplica), targetState)
replicaState.put(partitionAndReplica, ReplicaDeletionStarted)
// send stop replica command
brokerRequestBatch.addStopReplicaRequestForBrokers(List(replicaId), topic, partition, deletePartition = true,
callbacks.stopReplicaResponseCallback)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState))
case ReplicaDeletionIneligible =>
assertValidPreviousStates(partitionAndReplica, List(ReplicaDeletionStarted), targetState)
replicaState.put(partitionAndReplica, ReplicaDeletionIneligible)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState))
case ReplicaDeletionSuccessful =>
assertValidPreviousStates(partitionAndReplica, List(ReplicaDeletionStarted), targetState)
replicaState.put(partitionAndReplica, ReplicaDeletionSuccessful)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState))
case NonExistentReplica =>
assertValidPreviousStates(partitionAndReplica, List(ReplicaDeletionSuccessful), targetState)
// remove this replica from the assigned replicas list for its partition
val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition)
controllerContext.partitionReplicaAssignment.put(topicAndPartition, currentAssignedReplicas.filterNot(_ == replicaId))
replicaState.remove(partitionAndReplica)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState))
case OnlineReplica =>
assertValidPreviousStates(partitionAndReplica,
List(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible), targetState)
replicaState(partitionAndReplica) match {
case NewReplica =>
// add this replica to the assigned replicas list for its partition
val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition)
if(!currentAssignedReplicas.contains(replicaId))
controllerContext.partitionReplicaAssignment.put(topicAndPartition, currentAssignedReplicas :+ replicaId)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState,
targetState))
case _ =>
// check if the leader for this partition ever existed
controllerContext.partitionLeadershipInfo.get(topicAndPartition) match {
case Some(leaderIsrAndControllerEpoch) =>
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(List(replicaId), topic, partition, leaderIsrAndControllerEpoch,
replicaAssignment)
replicaState.put(partitionAndReplica, OnlineReplica)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState))
case None => // that means the partition was never in OnlinePartition state, this means the broker never
// started a log for that partition and does not have a high watermark value for this partition
}
}
replicaState.put(partitionAndReplica, OnlineReplica)
case OfflineReplica =>
assertValidPreviousStates(partitionAndReplica,
List(NewReplica, OnlineReplica, OfflineReplica, ReplicaDeletionIneligible), targetState)
// send stop replica command to the replica so that it stops fetching from the leader
brokerRequestBatch.addStopReplicaRequestForBrokers(List(replicaId), topic, partition, deletePartition = false)
// As an optimization, the controller removes dead replicas from the ISR
val leaderAndIsrIsEmpty: Boolean =
controllerContext.partitionLeadershipInfo.get(topicAndPartition) match {
case Some(currLeaderIsrAndControllerEpoch) =>
controller.removeReplicaFromIsr(topic, partition, replicaId) match {
case Some(updatedLeaderIsrAndControllerEpoch) =>
// send the shrunk ISR state change request to all the remaining alive replicas of the partition.
val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition)
if (!controller.deleteTopicManager.isPartitionToBeDeleted(topicAndPartition)) {
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(currentAssignedReplicas.filterNot(_ == replicaId),
topic, partition, updatedLeaderIsrAndControllerEpoch, replicaAssignment)
}
replicaState.put(partitionAndReplica, OfflineReplica)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s from %s to %s"
.format(controllerId, controller.epoch, replicaId, topicAndPartition, currState, targetState))
false
case None =>
true
}
case None =>
true
}
if (leaderAndIsrIsEmpty)
throw new StateChangeFailedException(
"Failed to change state of replica %d for partition %s since the leader and isr path in zookeeper is empty"
.format(replicaId, topicAndPartition))
}
}
catch {
case t: Throwable =>
stateChangeLogger.error("Controller %d epoch %d initiated state change of replica %d for partition [%s,%d] from %s to %s failed"
.format(controllerId, controller.epoch, replicaId, topic, partition, currState, targetState), t)
}
}
def areAllReplicasForTopicDeleted(topic: String): Boolean = {
val replicasForTopic = controller.controllerContext.replicasForTopic(topic)
val replicaStatesForTopic = replicasForTopic.map(r => (r, replicaState(r))).toMap
debug("Are all replicas for topic %s deleted %s".format(topic, replicaStatesForTopic))
replicaStatesForTopic.foldLeft(true)((deletionState, r) => deletionState && r._2 == ReplicaDeletionSuccessful)
}
def isAtLeastOneReplicaInDeletionStartedState(topic: String): Boolean = {
val replicasForTopic = controller.controllerContext.replicasForTopic(topic)
val replicaStatesForTopic = replicasForTopic.map(r => (r, replicaState(r))).toMap
replicaStatesForTopic.foldLeft(false)((deletionState, r) => deletionState || r._2 == ReplicaDeletionStarted)
}
def replicasInState(topic: String, state: ReplicaState): Set[PartitionAndReplica] = {
replicaState.filter(r => r._1.topic.equals(topic) && r._2 == state).keySet
}
def isAnyReplicaInState(topic: String, state: ReplicaState): Boolean = {
replicaState.exists(r => r._1.topic.equals(topic) && r._2 == state)
}
def replicasInDeletionStates(topic: String): Set[PartitionAndReplica] = {
val deletionStates = Set(ReplicaDeletionStarted, ReplicaDeletionSuccessful, ReplicaDeletionIneligible)
replicaState.filter(r => r._1.topic.equals(topic) && deletionStates.contains(r._2)).keySet
}
private def assertValidPreviousStates(partitionAndReplica: PartitionAndReplica, fromStates: Seq[ReplicaState],
targetState: ReplicaState) {
assert(fromStates.contains(replicaState(partitionAndReplica)),
"Replica %s should be in the %s states before moving to %s state"
.format(partitionAndReplica, fromStates.mkString(","), targetState) +
". Instead it is in %s state".format(replicaState(partitionAndReplica)))
}
private def registerBrokerChangeListener() = {
zkClient.subscribeChildChanges(ZkUtils.BrokerIdsPath, new BrokerChangeListener())
}
/**
* Invoked on startup of the replica's state machine to set the initial state for replicas of all existing partitions
* in zookeeper
*/
private def initializeReplicaState() {
for((topicPartition, assignedReplicas) <- controllerContext.partitionReplicaAssignment) {
val topic = topicPartition.topic
val partition = topicPartition.partition
assignedReplicas.foreach { replicaId =>
val partitionAndReplica = PartitionAndReplica(topic, partition, replicaId)
controllerContext.liveBrokerIds.contains(replicaId) match {
case true => replicaState.put(partitionAndReplica, OnlineReplica)
case false =>
// mark replicas on dead brokers as failed for topic deletion, if they belong to a topic to be deleted.
// This is required during controller failover since during controller failover a broker can go down,
// so the replicas on that broker should be moved to ReplicaDeletionIneligible to be on the safer side.
replicaState.put(partitionAndReplica, ReplicaDeletionIneligible)
}
}
}
}
def partitionsAssignedToBroker(topics: Seq[String], brokerId: Int):Seq[TopicAndPartition] = {
controllerContext.partitionReplicaAssignment.filter(_._2.contains(brokerId)).keySet.toSeq
}
/**
* This is the zookeeper listener that triggers all the state transitions for a replica
*/
class BrokerChangeListener() extends IZkChildListener with Logging {
this.logIdent = "[BrokerChangeListener on Controller " + controller.config.brokerId + "]: "
def handleChildChange(parentPath : String, currentBrokerList : java.util.List[String]) {
info("Broker change listener fired for path %s with children %s".format(parentPath, currentBrokerList.mkString(",")))
inLock(controllerContext.controllerLock) {
if (hasStarted.get) {
ControllerStats.leaderElectionTimer.time {
try {
val curBrokerIds = currentBrokerList.map(_.toInt).toSet
val newBrokerIds = curBrokerIds -- controllerContext.liveOrShuttingDownBrokerIds
val newBrokerInfo = newBrokerIds.map(ZkUtils.getBrokerInfo(zkClient, _))
val newBrokers = newBrokerInfo.filter(_.isDefined).map(_.get)
val deadBrokerIds = controllerContext.liveOrShuttingDownBrokerIds -- curBrokerIds
controllerContext.liveBrokers = curBrokerIds.map(ZkUtils.getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get)
info("Newly added brokers: %s, deleted brokers: %s, all live brokers: %s"
.format(newBrokerIds.mkString(","), deadBrokerIds.mkString(","), controllerContext.liveBrokerIds.mkString(",")))
newBrokers.foreach(controllerContext.controllerChannelManager.addBroker(_))
deadBrokerIds.foreach(controllerContext.controllerChannelManager.removeBroker(_))
if(newBrokerIds.size > 0)
controller.onBrokerStartup(newBrokerIds.toSeq)
if(deadBrokerIds.size > 0)
controller.onBrokerFailure(deadBrokerIds.toSeq)
} catch {
case e: Throwable => error("Error while handling broker changes", e)
}
}
}
}
}
}
}
sealed trait ReplicaState { def state: Byte }
case object NewReplica extends ReplicaState { val state: Byte = 1 }
case object OnlineReplica extends ReplicaState { val state: Byte = 2 }
case object OfflineReplica extends ReplicaState { val state: Byte = 3 }
case object ReplicaDeletionStarted extends ReplicaState { val state: Byte = 4}
case object ReplicaDeletionSuccessful extends ReplicaState { val state: Byte = 5}
case object ReplicaDeletionIneligible extends ReplicaState { val state: Byte = 6}
case object NonExistentReplica extends ReplicaState { val state: Byte = 7 }
|
jkreps/kafka
|
core/src/main/scala/kafka/controller/ReplicaStateMachine.scala
|
Scala
|
apache-2.0
| 22,134 |
object p {
// test parametric case classes, which synthesis `canEqual` and `equals`
enum Result[+T, +E] {
case OK [T](x: T) extends Result[T, Nothing]
case Err[E](e: E) extends Result[Nothing, E]
}
}
|
lampepfl/dotty
|
tests/pos-special/isInstanceOf/Result.scala
|
Scala
|
apache-2.0
| 215 |
package com.eltimn.scamongo.field
/*
* Copyright 2010 Tim Nelson
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
import _root_.net.liftweb.common.{Box, Empty, Failure, Full}
import _root_.net.liftweb.http.js.JE.Str
import _root_.net.liftweb.json.JsonAST.{JNothing, JValue}
import _root_.net.liftweb.record.{Field, Record}
import _root_.net.liftweb.util.Log
import com.mongodb._
class MongoMapField[OwnerType <: MongoRecord[OwnerType], MapValueType](rec: OwnerType)
extends Field[Map[String, MapValueType], OwnerType]
with MongoFieldFlavor[Map[String, MapValueType]] {
def asJs = Str(toString) // not implemented
def asJValue = (JNothing: JValue) // not implemented
def setFromJValue(jvalue: JValue) = Empty // not implemented
def asXHtml = <div></div> // not implemented
def defaultValue = Map[String, MapValueType]()
def setFromAny(in: Any): Box[Map[String, MapValueType]] = {
in match {
case map: Map[String, MapValueType] => Full(set(map))
case Some(map: Map[String, MapValueType]) => Full(set(map))
case Full(map: Map[String, MapValueType]) => Full(set(map))
case dbo: DBObject => setFromDBObject(dbo)
case seq: Seq[Map[String, MapValueType]] if !seq.isEmpty => setFromAny(seq(0))
case null => Full(set(null))
case s: String => setFromString(s)
case None | Empty | Failure(_, _, _) => Full(set(null))
case o => {
println("setFromString: "+o.toString)
setFromString(o.toString)
}
}
}
def toForm = <div></div> // not implemented
def owner = rec
/*
* Convert this field's value into a DBObject so it can be stored in Mongo.
* Compatible with most object types. Including Pattern, ObjectId, JObject,
* and JsonObject case classes
* Override this method for custom logic.
*/
def asDBObject: DBObject = {
val dbo = new BasicDBObject
for (k <- value.keys)
dbo.put(k.toString, value.getOrElse(k, ""))
dbo
}
// set this field's value using a DBObject returned from Mongo.
def setFromDBObject(dbo: DBObject): Box[Map[String, MapValueType]] = {
import scala.collection.jcl.Conversions._
//import scala.collection.mutable.{Map => MMap}
var ret = Map[String, MapValueType]()
for (k <- dbo.keySet)
ret += (k.toString -> dbo.get(k).asInstanceOf[MapValueType])
Full(set(ret))
}
}
|
eltimn/scamongo
|
src/main/scala/com/eltimn/scamongo/field/MongoMapField.scala
|
Scala
|
apache-2.0
| 2,832 |
/*
* Copyright (c) 2014 the original author or authors.
*
* Licensed under the MIT License;
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://opensource.org/licenses/MIT
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package git
import java.io.{RandomAccessFile, File}
import git.util.Compressor
case class PackFile(file: File)
object PackFile {
// Pre-defined bit flags.
val ExtendedBitFlag = 0
val CommitBitFlag = 1
val TreeBitFlag = 2
val BlobBitFlag = 3
val TagBitFlag = 4
val ReservedBitFlag = 5
val OffsetDelta = 6
/** Returns the object for the given ID. */
def findById(repository: Repository, pack: PackFile, offset: Int, id: ObjectId): Object = {
val raf = new RandomAccessFile(pack.file, "r")
raf.seek(offset)
val bytes = new Array[Byte](1)
raf.read(bytes)
// The first byte includes type information, and also size info, which may continue in the next bytes.
val firstByte = bytes(0) & 0xff
// Figure out the type.
val typeFlag = (firstByte >> 4) & 7
// Figure out the length.
def parseLength(index: Int, length: Int, shift: Int): Int = {
val bytes = new Array[Byte](1)
raf.read(bytes)
val c = bytes(0) & 0xff
val l = ((c & 0x7f) << shift) + length
if ((c & 0x80) == 0) l // We are not done until the most significant bit is 0.
else parseLength(index = index + 1, length = l, shift = shift + 7)
}
val length = parseLength(shift = 4, length = firstByte & 0x0f, index = 1)
// Retrieve the object data.
val deflatedBytes = new Array[Byte](length)
raf.read(deflatedBytes)
val objectBytes = Compressor.decompressData(deflatedBytes.toList)
typeFlag match {
case PackFile.BlobBitFlag => Blob.decodeBody(objectBytes, id = Some(id))
case PackFile.CommitBitFlag => Commit.decodeBody(objectBytes, id = Some(id))
case PackFile.TagBitFlag => Tag.decodeBody(objectBytes, id = Some(id))
case PackFile.TreeBitFlag => Tree.decodeBody(objectBytes, id = Some(id))
case _ => throw new CorruptRepositoryException(s"Could not parse object type: $typeFlag") // TODO: Deltas
}
}
}
|
kaisellgren/ScalaGit
|
src/main/scala/git/PackFile.scala
|
Scala
|
mit
| 2,576 |
package com.arcusys.valamis.web.portlet
import javax.portlet.{RenderRequest, RenderResponse}
import com.arcusys.valamis.certificate.service.AssignmentService
import com.arcusys.valamis.lrs.serializer.AgentSerializer
import com.arcusys.valamis.lrs.service.util.TincanHelper._
import com.arcusys.valamis.util.serialization.JsonHelper
import com.arcusys.valamis.web.portlet.base._
class GradebookView extends OAuthPortlet with PortletBase {
lazy val assignmentService = inject[AssignmentService]
override def doView(request: RenderRequest, response: RenderResponse) {
implicit val out = response.getWriter
val securityScope = getSecurityData(request)
sendTextFile("/templates/2.0/gradebook_templates.html")
sendTextFile("/templates/2.0/common_templates.html")
val user = LiferayHelpers.getUser(request)
val tincanActor = JsonHelper.toJson(user.getAgentByUuid, new AgentSerializer)
val endpoint = JsonHelper.toJson(getLrsEndpointInfo)
val permission = new PermissionUtil(request, this)
val viewAllPermission = permission.hasPermission(ViewAllPermission.name)
val data = Map(
"tincanActor" -> tincanActor,
"endpointData" -> endpoint,
"viewAllPermission" -> viewAllPermission,
"assignmentDeployed" -> assignmentService.isAssignmentDeployed
) ++ securityScope.data
sendMustacheFile(data, "gradebook.html")
}
}
|
igor-borisov/valamis
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/portlet/GradebookView.scala
|
Scala
|
gpl-3.0
| 1,392 |
package binders
import play.api.mvc.QueryStringBindable
case class Bounds(min: Int, max: Int)
object Bounds {
implicit def binder(implicit intBinder: QueryStringBindable[Int]) = new QueryStringBindable[Bounds] {
def bind(key: String, params: Map[String, Seq[String]]): Option[Either[String, Bounds]] = {
for {
minE <- intBinder.bind(key + ".min", params)
maxE <- intBinder.bind(key + ".max", params)
} yield {
(minE, maxE) match {
case (Right(min), Right(max)) if min <= max => Right(Bounds(min, max))
case _ => Left("Unable to bind bounds")
}
}
}
def unbind(key: String, bounds: Bounds) = {
// "queries[search]=asdf&perPage=100&sorts[name]=0"
key + ".min=" + intBinder.unbind(key, bounds.min) + "&" + key + ".max=" + intBinder.unbind(key, bounds.max)
}
// key + ".min=" + intBinder.unbind(bounds.min) + "&" + key + ".max=" + intBinder.unbind(bounds.max)
}
}
|
softberries/play-bootstrap-generator
|
play-bootstrap/app/binders/Bounds.scala
|
Scala
|
mit
| 979 |
package skinny.engine.routing
import skinny.engine.MultiParams
import scala.util.matching.Regex
/**
* A path pattern optionally matches a request path and extracts path
* parameters.
*/
case class PathPattern(regex: Regex, captureGroupNames: List[String] = Nil) {
def apply(path: String): Option[MultiParams] = {
// This is a performance hotspot. Hideous mutatations ahead.
val m = regex.pattern.matcher(path)
var multiParams = Map[String, Seq[String]]()
if (m.matches) {
var i = 0
captureGroupNames foreach { name =>
i += 1
val value = m.group(i)
if (value != null) {
val values = multiParams.getOrElse(name, Vector()) :+ value
multiParams = multiParams.updated(name, values)
}
}
Some(multiParams)
} else None
}
def +(pathPattern: PathPattern): PathPattern = PathPattern(
new Regex(this.regex.toString + pathPattern.regex.toString),
this.captureGroupNames ::: pathPattern.captureGroupNames
)
}
|
holycattle/skinny-framework
|
engine/src/main/scala/skinny/engine/routing/PathPattern.scala
|
Scala
|
mit
| 1,017 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.utils
/**
* provides conversion helpers
*/
object Conversions {
def newMap[A, B](data: (A, B)*) = Map(data: _*)
}
|
jyotikamboj/container
|
pf-framework/src/play/src/main/scala/play/utils/Conversions.scala
|
Scala
|
mit
| 207 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import scala.collection.Map
private[spark] case class Command(
mainClass: String,
arguments: Seq[String],
environment: Map[String, String],
classPathEntries: Seq[String],
libraryPathEntries: Seq[String],
javaOpts: Seq[String]) {
}
|
sh-cho/cshSpark
|
deploy/Command.scala
|
Scala
|
apache-2.0
| 1,091 |
package org.jetbrains.plugins.scala
package conversion.copy
import java.awt.datatransfer.Transferable
import java.util.Collections._
import java.{lang, util}
import com.intellij.codeInsight.editorActions.{CopyPastePostProcessor, TextBlockTransferableData}
import com.intellij.openapi.editor.{Editor, RangeMarker}
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.Ref
import com.intellij.psi.PsiFile
import org.jetbrains.annotations.{NotNull, Nullable}
import scala.collection.JavaConverters._
/**
* @author Pavel Fatin
*
* Common adapter for legacy interface implementations.
*/
abstract class SingularCopyPastePostProcessor[T <: TextBlockTransferableData] extends CopyPastePostProcessor[T] {
@NotNull
override final def collectTransferableData(file: PsiFile, editor: Editor,
startOffsets: Array[Int], endOffsets: Array[Int]) = {
val result = collectTransferableData0(file, editor, startOffsets, endOffsets)
if (result == null) emptyList() else singletonList(result)
}
@Nullable
protected def collectTransferableData0(file: PsiFile, editor: Editor,
startOffsets: Array[Int], endOffsets: Array[Int]): T
@NotNull
override final def extractTransferableData(content: Transferable) = {
val result = extractTransferableData0(content)
if (result == null) emptyList() else singletonList(result)
}
@Nullable
protected def extractTransferableData0(content: Transferable): T
override final def processTransferableData(project: Project, editor: Editor, bounds: RangeMarker,
caretOffset: Int, ref: Ref[lang.Boolean], values: util.List[T]) {
values.asScala.foreach { value =>
processTransferableData0(project, editor, bounds, caretOffset, ref, value)
}
}
protected def processTransferableData0(project: Project, editor: Editor, bounds: RangeMarker,
caretOffset: Int, ref: Ref[lang.Boolean], value: T)
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/conversion/copy/SingularCopyPastePostProcessor.scala
|
Scala
|
apache-2.0
| 2,062 |
package pl.touk.nussknacker.engine.util
object ThreadUtils {
def loadUsingContextLoader(className: String): Class[_] = Thread.currentThread().getContextClassLoader.loadClass(className)
def withThisAsContextClassLoader[T](classLoader: ClassLoader)(block: => T): T = {
val currentLoader = Thread.currentThread().getContextClassLoader
Thread.currentThread().setContextClassLoader(classLoader)
try {
block
} finally {
Thread.currentThread().setContextClassLoader(currentLoader)
}
}
}
|
TouK/nussknacker
|
utils/utils/src/main/scala/pl/touk/nussknacker/engine/util/ThreadUtils.scala
|
Scala
|
apache-2.0
| 522 |
// Jubatus: Online machine learning framework for distributed environment
// Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
package us.jubat.jubaql_server.processor
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.types.StructType
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.{StreamingContext, Time}
case class SchemaDStream(sqlc: SQLContext,
dataStream: DStream[Row],
schemaStream: DStream[StructType]) {
def print() = foreachRDD(rdd => {
println(rdd.schema)
rdd.foreach(println)
})
def registerStreamAsTable(name: String): Unit = {
foreachRDD(_.registerTempTable(name))
}
def foreachRDD(func: SchemaRDD => Unit): Unit = {
// We want to simulate stream.foreachRDD on the dataStream,
// but if we say dataStream.foreachRDD(...), we don't have
// access to the schema. the only way to merge the two
// streams is dataStream.transformWith(schemaStream ...).
// Therefore we use this transformWith() function, apply
// the function obtained as a parameter therein, and call
// count() to force execution.
def executeFunction(dataRDD: RDD[Row], schemaRDD: RDD[StructType]): RDD[Unit] = {
val schema: StructType = schemaRDD.collect.head
val dataWithSchema: SchemaRDD = sqlc.applySchema(dataRDD, schema)
val result = func(dataWithSchema)
schemaRDD.map(x => result)
}
dataStream.transformWith(schemaStream, executeFunction _).foreachRDD(_.count())
}
}
/**
* Helper object to construct a SchemaDStream from various input formats.
*/
object SchemaDStream {
private class RegisteredTableDStream(@transient ssc: StreamingContext,
@transient sqlc: SQLContext,
tableName: String) extends InputDStream[Row](ssc) {
override def start(): Unit = {}
override def stop(): Unit = {}
override def compute(validTime: Time): Option[RDD[Row]] = {
Some(sqlc.table(tableName))
}
}
private class SQLResultDStream(@transient ssc: StreamingContext,
@transient sqlc: SQLContext,
stmt: Either[String, LogicalPlan]) extends InputDStream[Row](ssc) {
override def start(): Unit = {}
override def stop(): Unit = {}
override def compute(validTime: Time): Option[RDD[Row]] = {
val rdd = stmt match {
case Left(s) =>
sqlc.sql(s)
case Right(p) =>
new SchemaRDD(sqlc, p)
}
Some(rdd)
}
}
/**
* Create a SchemaDStream from a DStream of JSON strings using schema inference.
*
* @param regName if Some(s), also register the created stream as a table with that name
*/
def fromStringStream(sqlc: SQLContext,
stream: DStream[String],
regName: Option[String]): SchemaDStream = {
val schemaStream: DStream[StructType] = stream.transform(rdd => {
try {
// the following call will compute the input RDD for schema
// inference even if it is never used afterwards
rdd.context.parallelize(sqlc.jsonRDD(rdd, 0.5).schema :: Nil)
} catch {
case e: UnsupportedOperationException if e.getMessage == "empty collection" =>
// if the collection is empty, we cannot infer the schema, so we
// return an empty schema.
// NB. Executing SQL on this (empty) SchemaRDD will fail because the
// columns are not known. It is the user's responsibility to
// do the "right thing" in that case.
val schema = StructType(Nil)
rdd.context.parallelize(schema :: Nil)
case e: Throwable =>
throw e
}
})
val rowStream: DStream[Row] = stream.transformWith(schemaStream,
(rows: RDD[String], schemas: RDD[StructType]) => {
val schema = schemas.collect().head
val rdd = sqlc.jsonRDD(rows, schema)
rdd
}).cache() // This `cache()` is required for Spark 1.2.2.
// register stream as a table
val resultStream = SchemaDStream(sqlc, rowStream, schemaStream)
regName.foreach(resultStream.registerStreamAsTable)
resultStream
}
/**
* Create a SchemaDStream from a DStream of JSON strings using given schema.
*
* @param regName if Some(s), also register the created stream as a table with that name
*/
def fromStringStreamWithSchema(sqlc: SQLContext,
stream: DStream[String],
schema: StructType,
regName: Option[String]): SchemaDStream = {
val schemaStream: DStream[StructType] = stream.transform(rdd => {
rdd.context.parallelize(schema :: Nil)
})
val rowStream: DStream[Row] = stream.transform((rows: RDD[String]) => {
sqlc.jsonRDD(rows, schema)
}).cache() // This `cache()` is required for Spark 1.2.2.
// register stream as a table
val resultStream = SchemaDStream(sqlc, rowStream, schemaStream)
regName.foreach(resultStream.registerStreamAsTable)
resultStream
}
/**
* Create a SchemaDStream as a transformation on a previously registered stream.
*
* @param lookupName name of the stream to operate on, as per `registerStreamAsTable()`
* @param transformation transformation of the stream
* @param regName if Some(s), also register the created stream as a table with that name
*/
def fromRDDTransformation(ssc: StreamingContext,
sqlc: SQLContext,
lookupName: String,
transformation: SchemaRDD => SchemaRDD,
regName: Option[String]): SchemaDStream = {
val baseStream = new RegisteredTableDStream(ssc, sqlc,
lookupName).cache() // This `cache()` is required for Spark 1.2.2.
val schemaStream = baseStream.transform(rdd => rdd match {
case s: SchemaRDD =>
rdd.context.parallelize(transformation(s).schema :: Nil)
})
// NB. Just evolving schema and row stream independent of each other
// does not seem to be enough any more in Spark 1.2. We also need
// to call `sqlc.applySchema()` with the new schema or we will run
// into a mysterious "is not cached" exception.
val rowStream = baseStream.transformWith(schemaStream,
(rdd: RDD[Row], schemaRDD: RDD[StructType]) => {
val schema = schemaRDD.collect()(0)
val outRdd = rdd match {
case s: SchemaRDD =>
transformation(s)
}
sqlc.applySchema(outRdd, schema)
}).cache() // This `cache()` is required for Spark 1.2.2.
// register stream as a table
val resultStream = SchemaDStream(sqlc, rowStream, schemaStream)
regName.foreach(resultStream.registerStreamAsTable)
resultStream
}
/**
* Create a SchemaDStream from a previously registered stream.
*
* @param lookupName name of the stream, as per `registerStreamAsTable()`
* @return
*/
def fromTableName(ssc: StreamingContext,
sqlc: SQLContext,
lookupName: String):
SchemaDStream = {
fromRDDTransformation(ssc, sqlc, lookupName, x => x, None)
}
/**
* Create a SchemaDStream as result of an SQL query in each interval.
*
* @param statement SQL statement (used tables must have been registered before)
* @param regName if Some(s), also register the created stream as a table with that name
*/
def fromSQL(ssc: StreamingContext,
sqlc: SQLContext,
statement: String,
regName: Option[String]): SchemaDStream = {
val baseStream = new SQLResultDStream(ssc, sqlc,
Left(statement)).cache() // This `cache()` is required for Spark 1.2.2.
val schemaStream = baseStream.transform(rdd => rdd match {
case s: SchemaRDD =>
rdd.context.parallelize(s.schema :: Nil)
})
// register stream as a table
val resultStream = SchemaDStream(sqlc, baseStream, schemaStream)
regName.foreach(resultStream.registerStreamAsTable)
resultStream
}
/**
* Create a SchemaDStream as result of an SQL query in each interval.
*
* @param selectPlan SQL plan (used tables must have been registered before)
* @param regName if Some(s), also register the created stream as a table with that name
*/
def fromSQL(ssc: StreamingContext,
sqlc: SQLContext,
selectPlan: LogicalPlan,
regName: Option[String]): SchemaDStream = {
val baseStream = new SQLResultDStream(ssc, sqlc,
Right(selectPlan)).cache() // This `cache()` is required for Spark 1.2.2.
val schemaStream = baseStream.transform(rdd => rdd match {
case s: SchemaRDD =>
rdd.context.parallelize(s.schema :: Nil)
})
// register stream as a table
val resultStream = SchemaDStream(sqlc, baseStream, schemaStream)
regName.foreach(resultStream.registerStreamAsTable)
resultStream
}
}
|
jubatus/jubaql-server
|
processor/src/main/scala/us/jubat/jubaql_server/processor/SchemaDStream.scala
|
Scala
|
lgpl-2.1
| 9,873 |
import java.net.URI
import java.time.ZonedDateTime
import com.github.j5ik2o.forseti.adaptor.generator.IdGenerator
import com.github.j5ik2o.forseti.adaptor.repository._
import com.github.j5ik2o.forseti.domain._
import com.github.j5ik2o.forseti.domain.client._
import com.github.j5ik2o.forseti.domain.user.{User, UserId, UserWriter}
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scalaz.std.scalaFuture._
import scalaz.{Maybe, NonEmptyList}
object DBInitializer {
import scala.concurrent.ExecutionContext.Implicits.global
val clientSecret1 =
"iQcmd8NPizM2rW3bCiBwm1YDNaOo4oSrnd3cvuI4aQQW98O6uJSq9hBcoJcxPzBcXDTTgzLWgy8nMSbDMUIhJU"
val clientSecret2 =
"iQcmd8NPizM2rW3bCiBwm1YDNaOo4oSrnd3cvuI4aQQW98O6uJSq9hBcoJcxPzBcXDTTgzLWgy8nMSbDMUIhJU"
val idConfig = IdConfig(41, 5, 5, 4, 1486324266177L)
val idGenerator: IdGenerator = new IdGeneratorImpl(idConfig)
val userWriter: UserWriter = new UserWriterImpl()
val clientWriter: ClientWriter = new ClientWriterImpl()
val clientReader: ClientReader = new ClientReaderImpl()
def run() = {
val now = ZonedDateTime.now()
val m = for {
userId <- idGenerator.generateId[UserId]
clientId1 <- idGenerator.generateId[ClientId]
user1 = User(
userId,
name = "test",
givenName = Maybe.empty,
familyName = Maybe.empty,
middleName = Maybe.empty,
nickName = Maybe.empty,
preferredUserName = Maybe.empty,
profileUri = Maybe.empty,
pictureUri = Maybe.empty,
websiteUri = Maybe.empty,
email = "[email protected]",
emailVerified = false,
password = "test",
genderType = Maybe.empty,
birthDate = Maybe.empty,
zoneInfo = Maybe.empty,
locale = Maybe.empty,
now,
now
)
client1 = Client(
clientId1,
userId = user1.id,
name = Maybe.empty,
clientType = ClientType.Confidential,
applicationType = Maybe.just(ApplicationType.WebApplication),
responseType = ResponseTypes(NonEmptyList(ResponseType.Code, ResponseType.Implicit)),
grantTypes = GrantTypes(NonEmptyList(GrantType.Code, GrantType.Implicit)),
clientKey = "client-1",
Maybe.just(ClientSecret(clientSecret1)),
RedirectUris(Set(URI.create("http://localhost:9001/code/callback"))),
// RedirectUris(Set(URI.create("http://localhost:9001/code/callback"), URI.create("http://localhost:9001/code/callback1"))),
Scope(Seq("openid")),
now,
now
)
_ <- userWriter.store(user1)
_ <- clientWriter.store(client1)
client1 <- clientReader.resolveById(client1.id)
} yield client1
Await.result(m.run, Duration.Inf).fold(_.printStackTrace(), println)
}
}
|
j5ik2o/forseti
|
app/open-id-provider/app/DBInitializer.scala
|
Scala
|
mit
| 2,853 |
package common
import xyz.nabijaczleweli.scala_game_of_life.cell.{Cell, Material}
import xyz.nabijaczleweli.scala_game_of_life.engine.registries.CellRegistry
import xyz.nabijaczleweli.scala_game_of_life.world.ICellAccess
/** @author Jędrzej
* @since 07.05.14
*/
object ThingsForTests {
final val testCellID = Short.MaxValue
private val m = CellRegistry.getClass.getMethod("add", classOf[CellRegistry.key_type], classOf[CellRegistry.stored_type])
m setAccessible true
m.invoke(CellRegistry, testCellID.asInstanceOf[Object], new Cell(Material.notAir) {override def draw(onScreenX: Int, onScreenY: Int, worldX: Long, worldY: Long, world: ICellAccess) {}}.asInstanceOf[Object])
}
|
nabijaczleweli/Scala-Game-of-Life
|
src/test/scala/common/ThingsForTests.scala
|
Scala
|
mit
| 689 |
package net.itadinanta.rnkr.backend.blackhole
import akka.actor.Props
import net.itadinanta.rnkr.backend.Metadata
import net.itadinanta.rnkr.backend.Storage.Reader
import net.itadinanta.rnkr.backend.Storage.Writer
import scala.concurrent.Future
import net.itadinanta.rnkr.backend.Watermark
import net.itadinanta.rnkr.engine.Leaderboard
import net.itadinanta.rnkr.backend.ReplayMode
object BlackHole {
class BlackHoleReader(
val datastore: Datastore,
val id: String) extends Reader {
implicit override lazy val executionContext = context.dispatcher
def append(l: List[Future[Page]], pageCount: Int): Future[Int] =
Future.successful(0)
def loadPage(watermark: Long, page: Int): Future[Page] =
Future.successful(Page(0, Seq()))
def loadMetadata(): Future[Metadata] =
Future.successful(Metadata())
def loadWatermark(): Future[Watermark] =
Future.successful(Watermark(0, 0))
def replayWal(watermark: Long): Future[Int] =
Future.successful(0)
}
class BlackHoleWriter(
val datastore: Datastore,
val id: String,
override val initialWatermark: Long,
override val metadata: Metadata) extends Writer {
implicit override lazy val executionContext = context.dispatcher
def compact(watermark: Watermark): Unit =
Unit
def storeRows(page: Int, rows: Seq[Leaderboard.Entry]): Future[Int] =
Future.successful(rows.size)
def storeWal(mode: ReplayMode.Value, timestamp: Long, watermark: Long, w: Leaderboard.Post): Future[Leaderboard.Post] =
Future.successful(w)
def storeWatermark(watermark: Long, pages: Int): Future[Watermark] =
Future.successful(Watermark(watermark, pages))
}
class Datastore extends net.itadinanta.rnkr.backend.Datastore {
override def readerProps(id: String) =
Props(new BlackHoleReader(this, id))
override def writerProps(id: String, watermark: Long, metadata: Metadata) =
Props(new BlackHoleWriter(this, id, watermark, metadata))
}
}
|
itadinanta/rnkr
|
rnkr-engine/src/main/scala/net/itadinanta/rnkr/backend/blackhole/BlackHole.scala
|
Scala
|
gpl-2.0
| 1,934 |
package org.scalafmt
import scala.language.postfixOps
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.meta.Tree
import scala.meta.parsers.Parse
import java.io.File
import org.scalafmt.Error.Incomplete
import org.scalafmt.Error.SearchStateExploded
import org.scalafmt.stats.TestStats
import org.scalafmt.util.DiffAssertions
import org.scalafmt.util.DiffTest
import org.scalafmt.util.FileOps
import org.scalafmt.util.FormatAssertions
import org.scalafmt.util.HasTests
import org.scalafmt.util.LoggerOps
import org.scalafmt.util.Report
import org.scalatest.BeforeAndAfterAllConfigMap
import org.scalatest.ConfigMap
import org.scalatest.FunSuite
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._
// TODO(olafur) property test: same solution without optimization or timeout.
class FormatTests
extends FunSuite
with Timeouts
with BeforeAndAfterAllConfigMap
with HasTests
with FormatAssertions
with DiffAssertions {
import LoggerOps._
lazy val onlyUnit = UnitTests.tests.exists(_.only)
lazy val onlyManual = !onlyUnit && ManualTests.tests.exists(_.only)
lazy val onlyOne = tests.exists(_.only)
override def ignore(t: DiffTest): Boolean = false
override val tests = {
if (onlyManual) ManualTests.tests
else UnitTests.tests
}
tests
.sortWith(bySpecThenName)
.withFilter(testShouldRun)
.foreach(runTest(run))
def run(t: DiffTest, parse: Parse[_ <: Tree]): Unit = {
val runner = scalafmtRunner(t.style.runner).copy(parser = parse)
val obtained =
Scalafmt.format(t.original, t.style.copy(runner = runner)) match {
case Formatted.Failure(e)
if t.style.onTestFailure.nonEmpty && e.getMessage.contains(
e.getMessage) =>
t.expected
case Formatted.Failure(e: Incomplete) => e.formattedCode
case Formatted.Failure(e: SearchStateExploded) =>
logger.elem(e)
e.partialOutput
case x => x.get
}
debugResults += saveResult(t, obtained, onlyOne)
if (t.style.rewrite.rules.isEmpty &&
!t.style.assumeStandardLibraryStripMargin &&
t.style.onTestFailure.isEmpty) {
assertFormatPreservesAst(t.original, obtained)(parse,
t.style.runner.dialect)
}
val formattedAgain =
Scalafmt.format(obtained, t.style.copy(runner = runner)).get
// getFormatOutput(t.style, true) // uncomment to debug
assertNoDiff(formattedAgain, obtained, "Idempotency violated")
if (!onlyManual) {
assertNoDiff(obtained, t.expected)
Debug.newTest()
}
}
def testShouldRun(t: DiffTest): Boolean = !onlyOne || t.only
def bySpecThenName(left: DiffTest, right: DiffTest): Boolean = {
import scala.math.Ordered.orderingToOrdered
(left.spec, left.name).compare(right.spec -> right.name) < 0
}
override def afterAll(configMap: ConfigMap): Unit = {
val splits = Debug.enqueuedSplits
.groupBy(_.line.value)
.toVector
.sortBy(-_._2.size)
.map(x => s"Split(line=${x._1}, count=${x._2.size})")
.take(3)
logger.debug(splits.mkString(", "))
logger.debug(s"Total explored: ${Debug.explored}")
val results = debugResults.result()
val stats = TestStats(results)
// TODO(olafur) don't block printing out test results.
// I don't want to deal with scalaz's Tasks :'(
val k = for {
_ <- Future(
FileOps.writeFile(s"target${File.separator}index.html",
Report.heatmap(results)))
} yield ()
// Travis exits right after running tests.
if (sys.env.contains("TRAVIS")) Await.ready(k, 20 seconds)
}
}
|
Daxten/scalafmt
|
core/src/test/scala/org/scalafmt/FormatTests.scala
|
Scala
|
apache-2.0
| 3,771 |
package com.maximus.imr.rest
import java.text.SimpleDateFormat
import java.util.Date
import com.maximus.imr.rest.util.RETURN
import org.junit.Assert._
import org.junit.{Before, Ignore, Test}
/**
* Created by tstockton on 8/24/16.
*
*/
class NoarfiFetchServiceTest extends NoarfiService {
val sdf = new SimpleDateFormat("yyyy-MM-dd")
@Before
def initialize() {
}
@Test
def testList() {
val jsonString: String = list()
System.out.println("noarfiFetch()")
assertNotNull("Looking for a non-null return", jsonString)
}
@Test
def testDetails() {
val csvSring: String = detail(sdf.parse("2016-11-30"))
assertNotNull("Looking for a non-null return", csvSring)
System.out.println("rfiSearch(RETURN.CSV)")
System.out.println(csvSring)
System.out.println("===============================================")
}
@Test
def testAcknowledge() {
val csvSring: String = acknowledge(sdf.parse("2016-11-30"))
assertNotNull("Looking for a non-null return", csvSring)
System.out.println("rfiSearch(RETURN.CSV)")
System.out.println(csvSring)
System.out.println("===============================================")
}
}
|
maxird/imr-ca-sdk
|
scala/src/test/java/com/maximus/imr/rest/NoarfiFetchServiceTest.scala
|
Scala
|
mit
| 1,222 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import java.util.Properties
import java.util.concurrent.LinkedBlockingQueue
import junit.framework.Assert._
import org.easymock.EasyMock
import org.junit.Test
import kafka.api._
import kafka.cluster.Broker
import kafka.common._
import kafka.message._
import kafka.producer.async._
import kafka.serializer._
import kafka.server.KafkaConfig
import kafka.utils.TestUtils._
import org.scalatest.junit.JUnit3Suite
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import kafka.utils._
class AsyncProducerTest extends JUnit3Suite {
val props = createBrokerConfigs(1)
val configs = props.map(p => new KafkaConfig(p))
override def setUp() {
super.setUp()
}
override def tearDown() {
super.tearDown()
}
@Test
def testProducerQueueSize() {
// a mock event handler that blocks
val mockEventHandler = new EventHandler[String,String] {
def handle(events: Seq[KeyedMessage[String,String]]) {
Thread.sleep(500)
}
def close {}
}
val props = new Properties()
props.put("serializer.class", "kafka.serializer.StringEncoder")
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
props.put("producer.type", "async")
props.put("queue.buffering.max.messages", "10")
props.put("batch.num.messages", "1")
props.put("queue.enqueue.timeout.ms", "0")
val config = new ProducerConfig(props)
val produceData = getProduceData(12)
val producer = new Producer[String, String](config, mockEventHandler)
try {
// send all 10 messages, should hit the batch size and then reach broker
producer.send(produceData: _*)
fail("Queue should be full")
}
catch {
case e: QueueFullException => //expected
}finally {
producer.close()
}
}
@Test
def testProduceAfterClosed() {
val props = new Properties()
props.put("serializer.class", "kafka.serializer.StringEncoder")
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
props.put("producer.type", "async")
props.put("batch.num.messages", "1")
val config = new ProducerConfig(props)
val produceData = getProduceData(10)
val producer = new Producer[String, String](config)
producer.close
try {
producer.send(produceData: _*)
fail("should complain that producer is already closed")
}
catch {
case e: ProducerClosedException => //expected
}
}
@Test
def testBatchSize() {
/**
* Send a total of 10 messages with batch size of 5. Expect 2 calls to the handler, one for each batch.
*/
val producerDataList = getProduceData(10)
val mockHandler = EasyMock.createStrictMock(classOf[DefaultEventHandler[String,String]])
mockHandler.handle(producerDataList.take(5))
EasyMock.expectLastCall
mockHandler.handle(producerDataList.takeRight(5))
EasyMock.expectLastCall
EasyMock.replay(mockHandler)
val queue = new LinkedBlockingQueue[KeyedMessage[String,String]](10)
val producerSendThread =
new ProducerSendThread[String,String]("thread1", queue, mockHandler, Integer.MAX_VALUE, 5, "")
producerSendThread.start()
for (producerData <- producerDataList)
queue.put(producerData)
producerSendThread.shutdown
EasyMock.verify(mockHandler)
}
@Test
def testQueueTimeExpired() {
/**
* Send a total of 2 messages with batch size of 5 and queue time of 200ms.
* Expect 1 calls to the handler after 200ms.
*/
val producerDataList = getProduceData(2)
val mockHandler = EasyMock.createStrictMock(classOf[DefaultEventHandler[String,String]])
mockHandler.handle(producerDataList)
EasyMock.expectLastCall
EasyMock.replay(mockHandler)
val queueExpirationTime = 200
val queue = new LinkedBlockingQueue[KeyedMessage[String,String]](10)
val producerSendThread =
new ProducerSendThread[String,String]("thread1", queue, mockHandler, queueExpirationTime, 5, "")
producerSendThread.start()
for (producerData <- producerDataList)
queue.put(producerData)
Thread.sleep(queueExpirationTime + 100)
EasyMock.verify(mockHandler)
producerSendThread.shutdown
}
@Test
def testPartitionAndCollateEvents() {
val producerDataList = new ArrayBuffer[KeyedMessage[Int,Message]]
// use bogus key and partition key override for some messages
producerDataList.append(new KeyedMessage[Int,Message]("topic1", key = 0, message = new Message("msg1".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic2", key = -99, partKey = 1, message = new Message("msg2".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic1", key = 2, message = new Message("msg3".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic1", key = -101, partKey = 3, message = new Message("msg4".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic2", key = 4, message = new Message("msg5".getBytes)))
val props = new Properties()
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
val broker1 = new Broker(0, "localhost", 9092)
val broker2 = new Broker(1, "localhost", 9093)
broker1
// form expected partitions metadata
val partition1Metadata = new PartitionMetadata(0, Some(broker1), List(broker1, broker2))
val partition2Metadata = new PartitionMetadata(1, Some(broker2), List(broker1, broker2))
val topic1Metadata = new TopicMetadata("topic1", List(partition1Metadata, partition2Metadata))
val topic2Metadata = new TopicMetadata("topic2", List(partition1Metadata, partition2Metadata))
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
topicPartitionInfos.put("topic2", topic2Metadata)
val intPartitioner = new Partitioner {
def partition(key: Any, numPartitions: Int): Int = key.asInstanceOf[Int] % numPartitions
}
val config = new ProducerConfig(props)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[Int,String](config,
partitioner = intPartitioner,
encoder = null.asInstanceOf[Encoder[String]],
keyEncoder = new IntEncoder(),
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val topic1Broker1Data =
ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic1", 0, new Message("msg1".getBytes)),
new KeyedMessage[Int,Message]("topic1", 2, new Message("msg3".getBytes)))
val topic1Broker2Data = ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic1", -101, 3, new Message("msg4".getBytes)))
val topic2Broker1Data = ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic2", 4, new Message("msg5".getBytes)))
val topic2Broker2Data = ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic2", -99, 1, new Message("msg2".getBytes)))
val expectedResult = Some(Map(
0 -> Map(
TopicAndPartition("topic1", 0) -> topic1Broker1Data,
TopicAndPartition("topic2", 0) -> topic2Broker1Data),
1 -> Map(
TopicAndPartition("topic1", 1) -> topic1Broker2Data,
TopicAndPartition("topic2", 1) -> topic2Broker2Data)
))
val actualResult = handler.partitionAndCollate(producerDataList)
assertEquals(expectedResult, actualResult)
}
@Test
def testSerializeEvents() {
val produceData = TestUtils.getMsgStrings(5).map(m => new KeyedMessage[String,String]("topic1",m))
val props = new Properties()
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
val config = new ProducerConfig(props)
// form expected partitions metadata
val topic1Metadata = getTopicMetadata("topic1", 0, 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[String,String](config,
partitioner = null.asInstanceOf[Partitioner],
encoder = new StringEncoder,
keyEncoder = new StringEncoder,
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val serializedData = handler.serialize(produceData)
val deserializedData = serializedData.map(d => new KeyedMessage[String,String](d.topic, Utils.readString(d.message.payload)))
TestUtils.checkEquals(produceData.iterator, deserializedData.iterator)
}
@Test
def testInvalidPartition() {
val producerDataList = new ArrayBuffer[KeyedMessage[String,Message]]
producerDataList.append(new KeyedMessage[String,Message]("topic1", "key1", new Message("msg1".getBytes)))
val props = new Properties()
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
val config = new ProducerConfig(props)
// form expected partitions metadata
val topic1Metadata = getTopicMetadata("topic1", 0, 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[String,String](config,
partitioner = new NegativePartitioner,
encoder = null.asInstanceOf[Encoder[String]],
keyEncoder = null.asInstanceOf[Encoder[String]],
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
try {
handler.partitionAndCollate(producerDataList)
}
catch {
// should not throw any exception
case e: Throwable => fail("Should not throw any exception")
}
}
@Test
def testNoBroker() {
val props = new Properties()
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
val config = new ProducerConfig(props)
// create topic metadata with 0 partitions
val topic1Metadata = new TopicMetadata("topic1", Seq.empty)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val producerPool = new ProducerPool(config)
val producerDataList = new ArrayBuffer[KeyedMessage[String,String]]
producerDataList.append(new KeyedMessage[String,String]("topic1", "msg1"))
val handler = new DefaultEventHandler[String,String](config,
partitioner = null.asInstanceOf[Partitioner],
encoder = new StringEncoder,
keyEncoder = new StringEncoder,
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
try {
handler.handle(producerDataList)
fail("Should fail with FailedToSendMessageException")
}
catch {
case e: FailedToSendMessageException => // we retry on any exception now
}
}
@Test
def testIncompatibleEncoder() {
val props = new Properties()
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
val config = new ProducerConfig(props)
val producer=new Producer[String, String](config)
try {
producer.send(getProduceData(1): _*)
fail("Should fail with ClassCastException due to incompatible Encoder")
} catch {
case e: ClassCastException =>
}finally {
producer.close()
}
}
@Test
def testRandomPartitioner() {
val props = new Properties()
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
val config = new ProducerConfig(props)
// create topic metadata with 0 partitions
val topic1Metadata = getTopicMetadata("topic1", 0, 0, "localhost", 9092)
val topic2Metadata = getTopicMetadata("topic2", 0, 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
topicPartitionInfos.put("topic2", topic2Metadata)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[String,String](config,
partitioner = null.asInstanceOf[Partitioner],
encoder = null.asInstanceOf[Encoder[String]],
keyEncoder = null.asInstanceOf[Encoder[String]],
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val producerDataList = new ArrayBuffer[KeyedMessage[String,Message]]
producerDataList.append(new KeyedMessage[String,Message]("topic1", new Message("msg1".getBytes)))
producerDataList.append(new KeyedMessage[String,Message]("topic2", new Message("msg2".getBytes)))
producerDataList.append(new KeyedMessage[String,Message]("topic1", new Message("msg3".getBytes)))
val partitionedDataOpt = handler.partitionAndCollate(producerDataList)
partitionedDataOpt match {
case Some(partitionedData) =>
for ((brokerId, dataPerBroker) <- partitionedData) {
for ( (TopicAndPartition(topic, partitionId), dataPerTopic) <- dataPerBroker)
assertTrue(partitionId == 0)
}
case None =>
fail("Failed to collate requests by topic, partition")
}
}
@Test
def testBrokerListAndAsync() {
return
val props = TestUtils.getProducerConfig(TestUtils.getBrokerListStrFromConfigs(configs))
props.put("producer.type", "async")
props.put("batch.num.messages", "5")
val config = new ProducerConfig(props)
val topic = "topic1"
val topic1Metadata = getTopicMetadata(topic, 0, 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val producerPool = new ProducerPool(config)
val msgs = TestUtils.getMsgStrings(10)
val handler = new DefaultEventHandler[String,String](config,
partitioner = null.asInstanceOf[Partitioner],
encoder = new StringEncoder,
keyEncoder = new StringEncoder,
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val producer = new Producer[String, String](config, handler)
try {
// send all 10 messages, should create 2 batches and 2 syncproducer calls
producer.send(msgs.map(m => new KeyedMessage[String,String](topic, m)): _*)
producer.close
} catch {
case e: Exception => fail("Not expected", e)
}
}
@Test
def testFailedSendRetryLogic() {
val props = new Properties()
props.put("metadata.broker.list", TestUtils.getBrokerListStrFromConfigs(configs))
props.put("request.required.acks", "1")
props.put("serializer.class", classOf[StringEncoder].getName.toString)
props.put("key.serializer.class", classOf[NullEncoder[Int]].getName.toString)
props.put("producer.num.retries", 3.toString)
val config = new ProducerConfig(props)
val topic1 = "topic1"
val topic1Metadata = getTopicMetadata(topic1, Array(0, 1), 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val msgs = TestUtils.getMsgStrings(2)
// produce request for topic1 and partitions 0 and 1. Let the first request fail
// entirely. The second request will succeed for partition 1 but fail for partition 0.
// On the third try for partition 0, let it succeed.
val request1 = TestUtils.produceRequestWithAcks(List(topic1), List(0, 1), messagesToSet(msgs), acks = 1, correlationId = 11)
val request2 = TestUtils.produceRequestWithAcks(List(topic1), List(0, 1), messagesToSet(msgs), acks = 1, correlationId = 17)
val response1 = ProducerResponse(0,
Map((TopicAndPartition("topic1", 0), ProducerResponseStatus(ErrorMapping.NotLeaderForPartitionCode.toShort, 0L)),
(TopicAndPartition("topic1", 1), ProducerResponseStatus(ErrorMapping.NoError, 0L))))
val request3 = TestUtils.produceRequest(topic1, 0, messagesToSet(msgs), acks = 1, correlationId = 21)
val response2 = ProducerResponse(0,
Map((TopicAndPartition("topic1", 0), ProducerResponseStatus(ErrorMapping.NoError, 0L))))
val mockSyncProducer = EasyMock.createMock(classOf[SyncProducer])
// don't care about config mock
EasyMock.expect(mockSyncProducer.config).andReturn(EasyMock.anyObject()).anyTimes()
EasyMock.expect(mockSyncProducer.send(request1)).andThrow(new RuntimeException) // simulate SocketTimeoutException
EasyMock.expect(mockSyncProducer.send(request2)).andReturn(response1)
EasyMock.expect(mockSyncProducer.send(request3)).andReturn(response2)
EasyMock.replay(mockSyncProducer)
val producerPool = EasyMock.createMock(classOf[ProducerPool])
EasyMock.expect(producerPool.getProducer(0)).andReturn(mockSyncProducer).times(4)
EasyMock.expect(producerPool.close())
EasyMock.replay(producerPool)
val handler = new DefaultEventHandler[Int,String](config,
partitioner = new FixedValuePartitioner(),
encoder = new StringEncoder(),
keyEncoder = new NullEncoder[Int](),
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val data = msgs.map(m => new KeyedMessage[Int,String](topic1, 0, m)) ++ msgs.map(m => new KeyedMessage[Int,String](topic1, 1, m))
handler.handle(data)
handler.close()
EasyMock.verify(mockSyncProducer)
EasyMock.verify(producerPool)
}
@Test
def testJavaProducer() {
val topic = "topic1"
val msgs = TestUtils.getMsgStrings(5)
val scalaProducerData = msgs.map(m => new KeyedMessage[String, String](topic, m))
val javaProducerData: java.util.List[KeyedMessage[String, String]] = {
import scala.collection.JavaConversions._
scalaProducerData
}
val mockScalaProducer = EasyMock.createMock(classOf[kafka.producer.Producer[String, String]])
mockScalaProducer.send(scalaProducerData.head)
EasyMock.expectLastCall()
mockScalaProducer.send(scalaProducerData: _*)
EasyMock.expectLastCall()
EasyMock.replay(mockScalaProducer)
val javaProducer = new kafka.javaapi.producer.Producer[String, String](mockScalaProducer)
javaProducer.send(javaProducerData.get(0))
javaProducer.send(javaProducerData)
EasyMock.verify(mockScalaProducer)
}
@Test
def testInvalidConfiguration() {
val props = new Properties()
props.put("serializer.class", "kafka.serializer.StringEncoder")
props.put("producer.type", "async")
try {
new ProducerConfig(props)
fail("should complain about wrong config")
}
catch {
case e: IllegalArgumentException => //expected
}
}
def getProduceData(nEvents: Int): Seq[KeyedMessage[String,String]] = {
val producerDataList = new ArrayBuffer[KeyedMessage[String,String]]
for (i <- 0 until nEvents)
producerDataList.append(new KeyedMessage[String,String]("topic1", null, "msg" + i))
producerDataList
}
private def getTopicMetadata(topic: String, partition: Int, brokerId: Int, brokerHost: String, brokerPort: Int): TopicMetadata = {
getTopicMetadata(topic, List(partition), brokerId, brokerHost, brokerPort)
}
private def getTopicMetadata(topic: String, partition: Seq[Int], brokerId: Int, brokerHost: String, brokerPort: Int): TopicMetadata = {
val broker1 = new Broker(brokerId, brokerHost, brokerPort)
new TopicMetadata(topic, partition.map(new PartitionMetadata(_, Some(broker1), List(broker1))))
}
def messagesToSet(messages: Seq[String]): ByteBufferMessageSet = {
new ByteBufferMessageSet(NoCompressionCodec, messages.map(m => new Message(m.getBytes)): _*)
}
def messagesToSet(key: Array[Byte], messages: Seq[Array[Byte]]): ByteBufferMessageSet = {
new ByteBufferMessageSet(NoCompressionCodec, messages.map(m => new Message(key = key, bytes = m)): _*)
}
}
class NegativePartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = -1
}
|
unix1986/universe
|
tool/kafka-0.8.1.1-src/core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala
|
Scala
|
bsd-2-clause
| 22,648 |
package com.peterpotts.sample
import org.scalatest.{Matchers, WordSpec}
import scala.collection.immutable.IndexedSeq
class SamplePickTest extends WordSpec with Matchers {
"A sample pick instance" should {
"pick values" in {
val animals = IndexedSeq("cat", "dog")
val animal = SamplePick(animals).next()
animals.contains(animal) shouldEqual true
}
}
}
|
peterpotts/sample
|
src/test/scala/com/peterpotts/sample/SamplePickTest.scala
|
Scala
|
mit
| 384 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.util.Date
import javax.servlet.http.HttpServletRequest
import scala.collection.mutable.{HashMap, ListBuffer}
import scala.xml._
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.jobs.UIData.{ExecutorUIData, JobUIData}
import org.apache.spark.ui.{ToolTips, UIUtils, WebUIPage}
/** Page showing list of all ongoing and recently finished jobs */
private[ui] class AllJobsPage(parent: JobsTab) extends WebUIPage("") {
private val JOBS_LEGEND =
<div class="legend-area"><svg width="150px" height="85px">
<rect class="succeeded-job-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Succeeded</text>
<rect class="failed-job-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Failed</text>
<rect class="running-job-legend"
x="5px" y="55px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="67px">Running</text>
</svg></div>.toString.filter(_ != '\\n')
private val EXECUTORS_LEGEND =
<div class="legend-area"><svg width="150px" height="55px">
<rect class="executor-added-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Added</text>
<rect class="executor-removed-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Removed</text>
</svg></div>.toString.filter(_ != '\\n')
private def getLastStageNameAndDescription(job: JobUIData): (String, String) = {
val lastStageInfo = Option(job.stageIds)
.filter(_.nonEmpty)
.flatMap { ids => parent.jobProgresslistener.stageIdToInfo.get(ids.max)}
val lastStageData = lastStageInfo.flatMap { s =>
parent.jobProgresslistener.stageIdToData.get((s.stageId, s.attemptId))
}
val name = lastStageInfo.map(_.name).getOrElse("(Unknown Stage Name)")
val description = lastStageData.flatMap(_.description).getOrElse("")
(name, description)
}
private def makeJobEvent(jobUIDatas: Seq[JobUIData]): Seq[String] = {
jobUIDatas.filter { jobUIData =>
jobUIData.status != JobExecutionStatus.UNKNOWN && jobUIData.submissionTime.isDefined
}.map { jobUIData =>
val jobId = jobUIData.jobId
val status = jobUIData.status
val (jobName, jobDescription) = getLastStageNameAndDescription(jobUIData)
val displayJobDescription = if (jobDescription.isEmpty) jobName else jobDescription
val submissionTime = jobUIData.submissionTime.get
val completionTimeOpt = jobUIData.completionTime
val completionTime = completionTimeOpt.getOrElse(System.currentTimeMillis())
val classNameByStatus = status match {
case JobExecutionStatus.SUCCEEDED => "succeeded"
case JobExecutionStatus.FAILED => "failed"
case JobExecutionStatus.RUNNING => "running"
case JobExecutionStatus.UNKNOWN => "unknown"
}
// The timeline library treats contents as HTML, so we have to escape them. We need to add
// extra layers of escaping in order to embed this in a Javascript string literal.
val escapedDesc = Utility.escape(displayJobDescription)
val jsEscapedDesc = StringEscapeUtils.escapeEcmaScript(escapedDesc)
val jobEventJsonAsStr =
s"""
|{
| 'className': 'job application-timeline-object ${classNameByStatus}',
| 'group': 'jobs',
| 'start': new Date(${submissionTime}),
| 'end': new Date(${completionTime}),
| 'content': '<div class="application-timeline-content"' +
| 'data-html="true" data-placement="top" data-toggle="tooltip"' +
| 'data-title="${jsEscapedDesc} (Job ${jobId})<br>' +
| 'Status: ${status}<br>' +
| 'Submitted: ${UIUtils.formatDate(new Date(submissionTime))}' +
| '${
if (status != JobExecutionStatus.RUNNING) {
s"""<br>Completed: ${UIUtils.formatDate(new Date(completionTime))}"""
} else {
""
}
}">' +
| '${jsEscapedDesc} (Job ${jobId})</div>'
|}
""".stripMargin
jobEventJsonAsStr
}
}
private def makeExecutorEvent(executorUIDatas: HashMap[String, ExecutorUIData]): Seq[String] = {
val events = ListBuffer[String]()
executorUIDatas.foreach {
case (executorId, event) =>
val addedEvent =
s"""
|{
| 'className': 'executor added',
| 'group': 'executors',
| 'start': new Date(${event.startTime}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${executorId}<br>' +
| 'Added at ${UIUtils.formatDate(new Date(event.startTime))}"' +
| 'data-html="true">Executor ${executorId} added</div>'
|}
""".stripMargin
events += addedEvent
if (event.finishTime.isDefined) {
val removedEvent =
s"""
|{
| 'className': 'executor removed',
| 'group': 'executors',
| 'start': new Date(${event.finishTime.get}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${executorId}<br>' +
| 'Removed at ${UIUtils.formatDate(new Date(event.finishTime.get))}' +
| '${
if (event.finishReason.isDefined) {
s"""<br>Reason: ${event.finishReason.get.replace("\\n", " ")}"""
} else {
""
}
}"' +
| 'data-html="true">Executor ${executorId} removed</div>'
|}
""".stripMargin
events += removedEvent
}
}
events.toSeq
}
private def makeTimeline(
jobs: Seq[JobUIData],
executors: HashMap[String, ExecutorUIData],
startTime: Long): Seq[Node] = {
val jobEventJsonAsStrSeq = makeJobEvent(jobs)
val executorEventJsonAsStrSeq = makeExecutorEvent(executors)
val groupJsonArrayAsStr =
s"""
|[
| {
| 'id': 'executors',
| 'content': '<div>Executors</div>${EXECUTORS_LEGEND}',
| },
| {
| 'id': 'jobs',
| 'content': '<div>Jobs</div>${JOBS_LEGEND}',
| }
|]
""".stripMargin
val eventArrayAsStr =
(jobEventJsonAsStrSeq ++ executorEventJsonAsStrSeq).mkString("[", ",", "]")
<span class="expand-application-timeline">
<span class="expand-application-timeline-arrow arrow-closed"></span>
<a data-toggle="tooltip" title={ToolTips.JOB_TIMELINE} data-placement="right">
Event Timeline
</a>
</span> ++
<div id="application-timeline" class="collapsed">
<div class="control-panel">
<div id="application-timeline-zoom-lock">
<input type="checkbox"></input>
<span>Enable zooming</span>
</div>
</div>
</div> ++
<script type="text/javascript">
{Unparsed(s"drawApplicationTimeline(${groupJsonArrayAsStr}," +
s"${eventArrayAsStr}, ${startTime});")}
</script>
}
private def jobsTable(jobs: Seq[JobUIData]): Seq[Node] = {
val someJobHasJobGroup = jobs.exists(_.jobGroup.isDefined)
val columns: Seq[Node] = {
<th>{if (someJobHasJobGroup) "Job Id (Job Group)" else "Job Id"}</th>
<th>Description</th>
<th>Submitted</th>
<th>Duration</th>
<th class="sorttable_nosort">Stages: Succeeded/Total</th>
<th class="sorttable_nosort">Tasks (for all stages): Succeeded/Total</th>
}
def makeRow(job: JobUIData): Seq[Node] = {
val (lastStageName, lastStageDescription) = getLastStageNameAndDescription(job)
val duration: Option[Long] = {
job.submissionTime.map { start =>
val end = job.completionTime.getOrElse(System.currentTimeMillis())
end - start
}
}
val formattedDuration = duration.map(d => UIUtils.formatDuration(d)).getOrElse("Unknown")
val formattedSubmissionTime = job.submissionTime.map(UIUtils.formatDate).getOrElse("Unknown")
val basePathUri = UIUtils.prependBaseUri(parent.basePath)
val jobDescription = UIUtils.makeDescription(lastStageDescription, basePathUri)
val detailUrl = "%s/jobs/job?id=%s".format(basePathUri, job.jobId)
<tr id={"job-" + job.jobId}>
<td sorttable_customkey={job.jobId.toString}>
{job.jobId} {job.jobGroup.map(id => s"($id)").getOrElse("")}
</td>
<td>
{jobDescription}
<a href={detailUrl} class="name-link">{lastStageName}</a>
</td>
<td sorttable_customkey={job.submissionTime.getOrElse(-1).toString}>
{formattedSubmissionTime}
</td>
<td sorttable_customkey={duration.getOrElse(-1).toString}>{formattedDuration}</td>
<td class="stage-progress-cell">
{job.completedStageIndices.size}/{job.stageIds.size - job.numSkippedStages}
{if (job.numFailedStages > 0) s"(${job.numFailedStages} failed)"}
{if (job.numSkippedStages > 0) s"(${job.numSkippedStages} skipped)"}
</td>
<td class="progress-cell">
{UIUtils.makeProgressBar(started = job.numActiveTasks, completed = job.numCompletedTasks,
failed = job.numFailedTasks, skipped = job.numSkippedTasks,
total = job.numTasks - job.numSkippedTasks)}
</td>
</tr>
}
<table class="table table-bordered table-striped table-condensed sortable">
<thead>{columns}</thead>
<tbody>
{jobs.map(makeRow)}
</tbody>
</table>
}
def render(request: HttpServletRequest): Seq[Node] = {
val listener = parent.jobProgresslistener
listener.synchronized {
val startTime = listener.startTime
val endTime = listener.endTime
val activeJobs = listener.activeJobs.values.toSeq
val completedJobs = listener.completedJobs.reverse.toSeq
val failedJobs = listener.failedJobs.reverse.toSeq
val activeJobsTable =
jobsTable(activeJobs.sortBy(_.submissionTime.getOrElse(-1L)).reverse)
val completedJobsTable =
jobsTable(completedJobs.sortBy(_.completionTime.getOrElse(-1L)).reverse)
val failedJobsTable =
jobsTable(failedJobs.sortBy(_.completionTime.getOrElse(-1L)).reverse)
val shouldShowActiveJobs = activeJobs.nonEmpty
val shouldShowCompletedJobs = completedJobs.nonEmpty
val shouldShowFailedJobs = failedJobs.nonEmpty
val completedJobNumStr = if (completedJobs.size == listener.numCompletedJobs) {
s"${completedJobs.size}"
} else {
s"${listener.numCompletedJobs}, only showing ${completedJobs.size}"
}
val summary: NodeSeq =
<div>
<ul class="unstyled">
<li>
<strong>Total Uptime:</strong>
{
if (endTime < 0 && parent.sc.isDefined) {
UIUtils.formatDuration(System.currentTimeMillis() - startTime)
} else if (endTime > 0) {
UIUtils.formatDuration(endTime - startTime)
}
}
</li>
<li>
<strong>Scheduling Mode: </strong>
{listener.schedulingMode.map(_.toString).getOrElse("Unknown")}
</li>
{
if (shouldShowActiveJobs) {
<li>
<a href="#active"><strong>Active Jobs:</strong></a>
{activeJobs.size}
</li>
}
}
{
if (shouldShowCompletedJobs) {
<li id="completed-summary">
<a href="#completed"><strong>Completed Jobs:</strong></a>
{completedJobNumStr}
</li>
}
}
{
if (shouldShowFailedJobs) {
<li>
<a href="#failed"><strong>Failed Jobs:</strong></a>
{listener.numFailedJobs}
</li>
}
}
</ul>
</div>
var content = summary
val executorListener = parent.executorListener
content ++= makeTimeline(activeJobs ++ completedJobs ++ failedJobs,
executorListener.executorIdToData, startTime)
if (shouldShowActiveJobs) {
content ++= <h4 id="active">Active Jobs ({activeJobs.size})</h4> ++
activeJobsTable
}
if (shouldShowCompletedJobs) {
content ++= <h4 id="completed">Completed Jobs ({completedJobNumStr})</h4> ++
completedJobsTable
}
if (shouldShowFailedJobs) {
content ++= <h4 id ="failed">Failed Jobs ({failedJobs.size})</h4> ++
failedJobsTable
}
val helpText = """A job is triggered by an action, like count() or saveAsTextFile().""" +
" Click on a job to see information about the stages of tasks inside it."
UIUtils.headerSparkPage("Spark Jobs", content, parent, helpText = Some(helpText))
}
}
}
|
haowu80s/spark
|
core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala
|
Scala
|
apache-2.0
| 14,429 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.util
import java.{util => ju, lang => jl}
import org.junit.Test
import scala.reflect.ClassTag
trait CollectionsOnSetsTest extends CollectionsOnCollectionsTest {
def factory: SetFactory
@Test def unmodifiableSet():Unit = {
def test[E: ClassTag](toElem: Int => E): Unit = {
val set = factory.empty[E]
testSetUnmodifiability(ju.Collections.unmodifiableSet(set), toElem(0))
set.addAll(rangeOfElems(toElem))
testSetUnmodifiability(ju.Collections.unmodifiableSet(set), toElem(0))
}
test[jl.Integer](_.toInt)
test[jl.Long](_.toLong)
test[jl.Double](_.toDouble)
test[String](_.toString)
}
}
trait CollectionsOnSortedSetsTest extends CollectionsOnSetsTest {
def factory: SortedSetFactory
@Test def unmodifiableSortedSet():Unit = {
def test[E: ClassTag](toElem: Int => E): Unit = {
val sortedSet = factory.empty[E]
testSortedSetUnmodifiability(ju.Collections.unmodifiableSortedSet(sortedSet),
toElem(0))
sortedSet.addAll(rangeOfElems(toElem))
testSortedSetUnmodifiability(ju.Collections.unmodifiableSortedSet(sortedSet),
toElem(0))
}
test[jl.Integer](_.toInt)
test[jl.Long](_.toLong)
test[jl.Double](_.toDouble)
test[String](_.toString)
}
}
class CollectionsOnHashSetFactoryTest extends CollectionsOnSetsTest {
def factory: SetFactory = new HashSetFactory
}
class CollectionsOnLinkedHashSetFactoryTest extends CollectionsOnSetsTest {
def factory: SetFactory = new LinkedHashSetFactory
}
class CollectionsOnConcurrentSkipListSetFactoryTest
extends CollectionsOnSetsTest {
def factory: SetFactory = new concurrent.ConcurrentSkipListSetFactory
}
|
scala-js/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/util/CollectionsOnSetsTest.scala
|
Scala
|
apache-2.0
| 1,991 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.infer
import org.apache.mxnet.io.NDArrayIter
import org.apache.mxnet.module.{BaseModule, Module}
import org.apache.mxnet._
import org.mockito.Matchers._
import org.mockito.Mockito
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class PredictorSuite extends FunSuite with BeforeAndAfterAll {
class MyPredictor(val modelPathPrefix: String,
override val inputDescriptors: IndexedSeq[DataDesc])
extends Predictor(modelPathPrefix, inputDescriptors, epoch = Some(0)) {
override def loadModule(): Module = mockModule
val getIDescriptor: IndexedSeq[DataDesc] = iDescriptors
val getBatchSize: Int = batchSize
val getBatchIndex: Int = batchIndex
lazy val mockModule: Module = Mockito.mock(classOf[Module])
}
test("PredictorSuite-testPredictorConstruction") {
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(1, 3, 2, 2),
layout = Layout.NCHW))
val mockPredictor = new MyPredictor("xyz", inputDescriptor)
assert(mockPredictor.getBatchSize == 1)
assert(mockPredictor.getBatchIndex == inputDescriptor(0).layout.indexOf('N'))
val inputDescriptor2 = IndexedSeq[DataDesc](new DataDesc("data", Shape(1, 3, 2, 2),
layout = Layout.NCHW),
new DataDesc("data", Shape(2, 3, 2, 2), layout = Layout.NCHW))
assertThrows[IllegalArgumentException] {
val mockPredictor = new MyPredictor("xyz", inputDescriptor2)
}
// batchsize is defaulted to 1
val iDesc2 = IndexedSeq[DataDesc](new DataDesc("data", Shape(3, 2, 2), layout = "CHW"))
val p2 = new MyPredictor("xyz", inputDescriptor)
assert(p2.getBatchSize == 1, "should use a default batch size of 1")
}
test("PredictorSuite-testWithFlatArrays") {
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2),
layout = Layout.NCHW))
val inputData = Array.fill[Float](12)(1)
// this will disposed at the end of the predict call on Predictor.
val predictResult = IndexedSeq(NDArray.ones(Shape(1, 3, 2, 2)))
val testPredictor = new MyPredictor("xyz", inputDescriptor)
Mockito.doReturn(predictResult).when(testPredictor.mockModule)
.predict(any(classOf[NDArrayIter]), any[Int], any[Boolean])
val testFun = testPredictor.predict(IndexedSeq(inputData))
assert(testFun.size == 1, "output size should be 1 ")
assert(Array.fill[Float](12)(1).mkString == testFun(0).mkString)
// Verify that the module was bound with batch size 1 and rebound back to the original
// input descriptor. the number of times is twice here because loadModule overrides the
// initial bind.
Mockito.verify(testPredictor.mockModule, Mockito.times(2)).bind(any[IndexedSeq[DataDesc]],
any[Option[IndexedSeq[DataDesc]]], any[Boolean], any[Boolean], any[Boolean]
, any[Option[BaseModule]], any[String])
}
test("PredictorSuite-testWithFlatFloat64Arrays") {
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2),
layout = Layout.NCHW, dtype = DType.Float64))
val inputData = Array.fill[Double](12)(1d)
// this will disposed at the end of the predict call on Predictor.
val predictResult = IndexedSeq(NDArray.ones(Shape(1, 3, 2, 2), dtype = DType.Float64))
val testPredictor = new MyPredictor("xyz", inputDescriptor)
Mockito.doReturn(predictResult).when(testPredictor.mockModule)
.predict(any(classOf[NDArrayIter]), any[Int], any[Boolean])
val testFun = testPredictor.predict(IndexedSeq(inputData))
assert(testFun.size == 1, "output size should be 1 ")
assert(testFun(0)(0).getClass == 1d.getClass)
assert(Array.fill[Double](12)(1d).mkString == testFun(0).mkString)
// Verify that the module was bound with batch size 1 and rebound back to the original
// input descriptor. the number of times is twice here because loadModule overrides the
// initial bind.
Mockito.verify(testPredictor.mockModule, Mockito.times(2)).bind(any[IndexedSeq[DataDesc]],
any[Option[IndexedSeq[DataDesc]]], any[Boolean], any[Boolean], any[Boolean]
, any[Option[BaseModule]], any[String])
}
test("PredictorSuite-testWithNDArray") {
val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2),
layout = Layout.NCHW))
val inputData = NDArray.ones(Shape(1, 3, 2, 2))
// this will disposed at the end of the predict call on Predictor.
val predictResult = IndexedSeq(NDArray.ones(Shape(1, 3, 2, 2)))
val testPredictor = new MyPredictor("xyz", inputDescriptor)
Mockito.doReturn(predictResult).when(testPredictor.mockModule)
.predict(any(classOf[NDArrayIter]), any[Int], any[Boolean])
val testFun = testPredictor.predictWithNDArray(IndexedSeq(inputData))
assert(testFun.size == 1, "output size should be 1")
assert(Array.fill[Float](12)(1).mkString == testFun(0).toArray.mkString)
Mockito.verify(testPredictor.mockModule, Mockito.times(2)).bind(any[IndexedSeq[DataDesc]],
any[Option[IndexedSeq[DataDesc]]], any[Boolean], any[Boolean], any[Boolean]
, any[Option[BaseModule]], any[String])
}
}
|
dmlc/mxnet
|
scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala
|
Scala
|
apache-2.0
| 5,955 |
package com.datawizards.dqm.rules
import com.datawizards.dqm.rules.field.FieldRule
case class FieldRules(field: String, rules: Seq[FieldRule])
|
piotr-kalanski/data-quality-monitoring
|
src/main/scala/com/datawizards/dqm/rules/FieldRules.scala
|
Scala
|
apache-2.0
| 145 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.compiler
package graph
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import java.io.{ DataInput, DataOutput }
import scala.collection.JavaConversions._
import scala.concurrent.{ Await, Future }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.Writable
import org.apache.spark.{ HashPartitioner, Partitioner, SparkConf }
import org.apache.spark.rdd.RDD
import com.asakusafw.bridge.stage.StageInfo
import com.asakusafw.lang.compiler.model.description.{ ClassDescription, ImmediateDescription }
import com.asakusafw.lang.compiler.model.graph.{ Groups, MarkerOperator }
import com.asakusafw.lang.compiler.model.testing.OperatorExtractor
import com.asakusafw.lang.compiler.planning.{ PlanBuilder, PlanMarker }
import com.asakusafw.runtime.model.DataModel
import com.asakusafw.runtime.value.IntOption
import com.asakusafw.spark.compiler.{ ClassServerForAll, FlowIdForEach }
import com.asakusafw.spark.compiler.graph._
import com.asakusafw.spark.compiler.planning.{
IterativeInfo,
PartitionGroupInfo,
SubPlanInfo,
SubPlanOutputInfo
}
import com.asakusafw.spark.runtime._
import com.asakusafw.spark.runtime.graph.{
Broadcast,
BroadcastId,
Aggregate,
ParallelCollectionSource,
SortOrdering,
Source
}
import com.asakusafw.spark.runtime.io.WritableSerDe
import com.asakusafw.spark.runtime.rdd.{ BranchKey, ShuffleKey }
import com.asakusafw.vocabulary.flow.processor.PartialAggregation
import com.asakusafw.vocabulary.operator.Fold
import com.asakusafw.spark.extensions.iterativebatch.compiler.spi.RoundAwareNodeCompiler
import com.asakusafw.spark.extensions.iterativebatch.runtime.graph.RoundAwareParallelCollectionSource
@RunWith(classOf[JUnitRunner])
class AggregateClassBuilderSpecTest extends AggregateClassBuilderSpec
class AggregateClassBuilderSpec
extends FlatSpec
with ClassServerForAll
with SparkForAll
with FlowIdForEach
with UsingCompilerContext
with JobContextSugar
with RoundContextSugar {
import AggregateClassBuilderSpec._
behavior of classOf[AggregateClassBuilder].getSimpleName
override def configure(conf: SparkConf): SparkConf = {
conf.set(Props.Parallelism, 8.toString)
super.configure(conf)
}
for {
(dataSize, numPartitions) <- Seq(
(PartitionGroupInfo.DataSize.TINY, 1),
(PartitionGroupInfo.DataSize.SMALL, 4),
(PartitionGroupInfo.DataSize.REGULAR, 8),
(PartitionGroupInfo.DataSize.LARGE, 16),
(PartitionGroupInfo.DataSize.HUGE, 32))
iterativeInfo <- Seq(
IterativeInfo.always(),
IterativeInfo.never(),
IterativeInfo.parameter("round"))
} {
val conf = s"DataSize: ${dataSize}, IterativeInfo: ${iterativeInfo}"
it should s"build aggregate class: [${conf}]" in {
val foosMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.CHECKPOINT).build()
val operator = OperatorExtractor
.extract(classOf[Fold], classOf[FoldOperator], "fold")
.input("foos", ClassDescription.of(classOf[Foo]), foosMarker.getOutput)
.output("result", ClassDescription.of(classOf[Foo]))
.argument("n", ImmediateDescription.of(10))
.build()
val resultMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.CHECKPOINT).build()
operator.findOutput("result").connect(resultMarker.getInput)
val plan = PlanBuilder.from(Seq(operator))
.add(
Seq(foosMarker),
Seq(resultMarker)).build().getPlan()
assert(plan.getElements.size === 1)
val subplan = plan.getElements.head
subplan.putAttr(
new SubPlanInfo(_,
SubPlanInfo.DriverType.AGGREGATE,
Seq.empty[SubPlanInfo.DriverOption],
operator))
subplan.putAttr(_ => iterativeInfo)
val foosInput = subplan.findIn(foosMarker)
subplan.findOut(resultMarker)
.putAttr(
new SubPlanOutputInfo(_,
SubPlanOutputInfo.OutputType.AGGREGATED,
Seq.empty[SubPlanOutputInfo.OutputOption],
Groups.parse(Seq("i")),
operator))
.putAttr(_ => new PartitionGroupInfo(dataSize))
implicit val context = newNodeCompilerContext(flowId, classServer.root.toFile)
context.branchKeys.getField(foosInput.getOperator.getSerialNumber)
val compiler = RoundAwareNodeCompiler.get(subplan)
val thisType = compiler.compile(subplan)
context.addClass(context.branchKeys)
context.addClass(context.broadcastIds)
val cls = classServer.loadClass(thisType).asSubclass(classOf[Aggregate[Foo, Foo]])
val branchKeyCls = classServer.loadClass(context.branchKeys.thisType.getClassName)
def getBranchKey(marker: MarkerOperator): BranchKey = {
val sn = subplan.getOperators.toSet
.find(_.getOriginalSerialNumber == marker.getOriginalSerialNumber).get.getSerialNumber
branchKeyCls.getField(context.branchKeys.getField(sn)).get(null).asInstanceOf[BranchKey]
}
implicit val jobContext = newJobContext(sc)
val foos =
new RoundAwareParallelCollectionSource(getBranchKey(foosMarker), (0 until 10))("foos")
.mapWithRoundContext(getBranchKey(foosMarker))(Foo.intToFoo)
val aggregate = cls.getConstructor(
classOf[Seq[(Source, BranchKey)]],
classOf[Option[SortOrdering]],
classOf[Partitioner],
classOf[Map[BroadcastId, Broadcast[_]]],
classOf[JobContext])
.newInstance(
Seq((foos, getBranchKey(foosMarker))),
Option(new Foo.SortOrdering()),
new HashPartitioner(2),
Map.empty,
jobContext)
assert(aggregate.branchKeys === Set(resultMarker).map(getBranchKey))
assert(aggregate.partitioners(getBranchKey(resultMarker)).get.numPartitions === numPartitions)
for {
round <- 0 to 1
} {
val rc = newRoundContext(
stageId = s"round_${round}",
batchArguments = Map("round" -> round.toString))
val bias = if (iterativeInfo.isIterative) 100 * round else 0
val results = aggregate.compute(rc)
val result = Await.result(
results(getBranchKey(resultMarker))
.map { rddF =>
val rdd = rddF()
assert(rdd.partitions.size === numPartitions)
rdd.map {
case (_, foo: Foo) => (foo.i.get, foo.sum.get)
}.collect.toSeq.sortBy(_._1)
}, Duration.Inf)
assert(result === Seq(
(bias + 0, (0 until 10 by 2).map(i => bias + i * 100).sum + 4 * 10),
(bias + 1, (1 until 10 by 2).map(i => bias + i * 100).sum + 4 * 10)))
}
}
it should s"build aggregate class with grouping is empty: [${conf}]" in {
val foosMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.CHECKPOINT).build()
val operator = OperatorExtractor
.extract(classOf[Fold], classOf[FoldOperator], "fold")
.input("foos", ClassDescription.of(classOf[Foo]), foosMarker.getOutput)
.output("result", ClassDescription.of(classOf[Foo]))
.argument("n", ImmediateDescription.of(10))
.build()
val resultMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.CHECKPOINT).build()
operator.findOutput("result").connect(resultMarker.getInput)
val plan = PlanBuilder.from(Seq(operator))
.add(
Seq(foosMarker),
Seq(resultMarker)).build().getPlan()
assert(plan.getElements.size === 1)
val subplan = plan.getElements.head
subplan.putAttr(
new SubPlanInfo(_,
SubPlanInfo.DriverType.AGGREGATE,
Seq.empty[SubPlanInfo.DriverOption],
operator))
subplan.putAttr(_ => iterativeInfo)
val foosInput = subplan.findIn(foosMarker)
subplan.findOut(resultMarker)
.putAttr(
new SubPlanOutputInfo(_,
SubPlanOutputInfo.OutputType.AGGREGATED,
Seq.empty[SubPlanOutputInfo.OutputOption],
Groups.parse(Seq.empty[String]),
operator))
.putAttr(_ => new PartitionGroupInfo(dataSize))
implicit val context = newNodeCompilerContext(flowId, classServer.root.toFile)
context.branchKeys.getField(foosInput.getOperator.getSerialNumber)
val compiler = RoundAwareNodeCompiler.get(subplan)
val thisType = compiler.compile(subplan)
context.addClass(context.branchKeys)
context.addClass(context.broadcastIds)
val cls = classServer.loadClass(thisType).asSubclass(classOf[Aggregate[Foo, Foo]])
val branchKeyCls = classServer.loadClass(context.branchKeys.thisType.getClassName)
def getBranchKey(marker: MarkerOperator): BranchKey = {
val sn = subplan.getOperators.toSet
.find(_.getOriginalSerialNumber == marker.getOriginalSerialNumber).get.getSerialNumber
branchKeyCls.getField(context.branchKeys.getField(sn)).get(null).asInstanceOf[BranchKey]
}
implicit val jobContext = newJobContext(sc)
val foos =
new RoundAwareParallelCollectionSource(getBranchKey(foosMarker), (0 until 10))("foos")
.mapWithRoundContext(getBranchKey(foosMarker))(Foo.intToFoo)
.map(getBranchKey(foosMarker)) {
foo: (ShuffleKey, Foo) =>
(new ShuffleKey(Array.emptyByteArray, Array.emptyByteArray), foo._2)
}
val aggregate = cls.getConstructor(
classOf[Seq[(Source, BranchKey)]],
classOf[Option[SortOrdering]],
classOf[Partitioner],
classOf[Map[BroadcastId, Broadcast[_]]],
classOf[JobContext])
.newInstance(
Seq((foos, getBranchKey(foosMarker))),
None,
new HashPartitioner(2),
Map.empty,
jobContext)
assert(aggregate.branchKeys === Set(resultMarker).map(getBranchKey))
assert(aggregate.partitioners(getBranchKey(resultMarker)).get.numPartitions === 1)
for {
round <- 0 to 1
} {
val rc = newRoundContext(
stageId = s"round_${round}",
batchArguments = Map("round" -> round.toString))
val bias = if (iterativeInfo.isIterative) 100 * round else 0
val results = aggregate.compute(rc)
val result = Await.result(
results(getBranchKey(resultMarker))
.map { rddF =>
val rdd = rddF()
assert(rdd.partitions.size === 1)
rdd.map {
case (_, foo: Foo) => (foo.i.get, foo.sum.get)
}.collect.toSeq.sortBy(_._1)
}, Duration.Inf)
assert(result.size === 1)
assert(result(0)._2 === (0 until 10).map(i => bias + i * 100).sum + 9 * 10)
}
}
}
}
object AggregateClassBuilderSpec {
class Foo extends DataModel[Foo] with Writable {
val i = new IntOption()
val sum = new IntOption()
override def reset(): Unit = {
i.setNull()
sum.setNull()
}
override def copyFrom(other: Foo): Unit = {
i.copyFrom(other.i)
sum.copyFrom(other.sum)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
sum.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
sum.write(out)
}
def getIOption: IntOption = i
def getSumOption: IntOption = sum
}
object Foo {
def intToFoo(rc: RoundContext): Int => (_, Foo) = {
val stageInfo = StageInfo.deserialize(rc.hadoopConf.value.get(StageInfo.KEY_NAME))
val round = stageInfo.getBatchArguments()("round").toInt
lazy val foo = new Foo()
{ i =>
foo.i.modify(100 * round + (i % 2))
foo.sum.modify(100 * round + i * 100)
val shuffleKey = new ShuffleKey(
WritableSerDe.serialize(foo.i),
Array.emptyByteArray)
(shuffleKey, foo)
}
}
class SortOrdering extends Ordering[ShuffleKey] {
override def compare(x: ShuffleKey, y: ShuffleKey): Int = {
IntOption.compareBytes(x.grouping, 0, x.grouping.length, y.grouping, 0, y.grouping.length)
}
}
}
class FoldOperator {
@Fold(partialAggregation = PartialAggregation.PARTIAL)
def fold(acc: Foo, value: Foo, n: Int): Unit = {
acc.sum.add(value.sum)
acc.sum.add(n)
}
}
}
|
asakusafw/asakusafw-spark
|
extensions/iterativebatch/compiler/core/src/test/scala/com/asakusafw/spark/extensions/iterativebatch/compiler/graph/AggregateClassBuilderSpec.scala
|
Scala
|
apache-2.0
| 13,285 |
object Test {
class C(x: Int, y: Int) {
def this(x: Int = 1)(y: String) =
this(x, y.toInt)
}
def test: Unit = {
new C()("1")
}
}
|
som-snytt/dotty
|
tests/pos/i3171.scala
|
Scala
|
apache-2.0
| 152 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.repository.jdbc
import scala.collection.JavaConverters.asScalaBufferConverter
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.policies.DirectiveId
import com.normation.rudder.domain.policies.RuleId
import com.normation.rudder.repository.ReportsRepository
import org.joda.time._
import org.slf4j.{Logger,LoggerFactory}
import com.normation.rudder.domain.reports._
import com.normation.cfclerk.domain.{Cf3PolicyDraftId}
import org.springframework.jdbc.core._
import java.sql.ResultSet
import java.sql.Timestamp
import net.liftweb.common._
import net.liftweb.common.Box._
import java.sql.Types
import org.springframework.dao.DataAccessException
import com.normation.rudder.reports.execution.AgentRun
import com.normation.rudder.domain.reports.Reports
import com.normation.rudder.reports.execution.AgentRunId
import com.normation.rudder.reports.execution.AgentRun
import com.normation.rudder.reports.execution.AgentRun
import com.normation.rudder.reports.execution.AgentRun
class ReportsJdbcRepository(jdbcTemplate : JdbcTemplate) extends ReportsRepository with Loggable {
val reportsTable = "ruddersysevents"
val archiveTable = "archivedruddersysevents"
private[this] val baseQuery = "select executiondate, nodeid, ruleid, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from RudderSysEvents where 1=1 ";
private[this] val baseArchivedQuery = "select executiondate, nodeid, ruleid, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from archivedruddersysevents where 1=1 ";
private[this] val reportsExecutionTable = "reportsexecution"
private[this] val idQuery = "select id, executiondate, nodeid, ruleid, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from ruddersysevents where 1=1 ";
// find the last full run per node
// we are not looking for older request than interval minutes
private[this] def lastQuery(interval: Int) = s"select nodeid as Node, max(date) as Time from reportsexecution where date > (now() - interval '${interval} minutes') and complete = true group by nodeid"
private[this] def lastQueryByNode(interval: Int) = s"select nodeid as Node, max(date) as Time from reportsexecution where date > (now() - interval '${interval} minutes') and nodeid = ? and complete = true group by nodeid"
private[this] def joinQuery(interval: Int) = "select executiondate, nodeid, ruleId, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from RudderSysEvents join (" + lastQuery(interval) +" ) as Ordering on Ordering.Node = nodeid and executionTimeStamp = Ordering.Time where 1=1"
private[this] def joinQueryByNode(interval: Int) = "select executiondate, nodeid, ruleId, directiveid, serial, component, keyValue, executionTimeStamp, eventtype, policy, msg from RudderSysEvents join (" + lastQueryByNode(interval) +" ) as Ordering on Ordering.Node = nodeid and executionTimeStamp = Ordering.Time where 1=1";
private[this] def boxed[A](name: String)(body: => A): Box[A] = {
try {
Full(body)
} catch {
case ex: Exception =>
val msg = "Error when trying to " + name
logger.error(msg, ex)
Failure(msg, Full(ex), Empty)
}
}
override def getExecutionReports(runs: Set[AgentRunId], filterByRules: Set[RuleId]): Box[Map[NodeId, Seq[Reports]]] = {
if(runs.isEmpty) Full(Map())
else {
val nodeParam = runs.map(x => s"('${x.nodeId.value}','${new Timestamp(x.date.getMillis)}'::timestamp)" ).mkString(",")
val ruleClause = if(filterByRules.isEmpty) ""
else s"and ruleid in ${filterByRules.map(_.value).mkString("('", "','" , "')")}"
/*
* be careful in the number of parenthesis for "in values", it is:
* ... in (VALUES ('a', 'b') );
* ... in (VALUES ('a', 'b'), ('c', 'd') );
* etc. No more, no less.
*/
val query =
s"""select
executiondate, nodeid, ruleId, serial, directiveid, component, keyValue, executionTimeStamp, eventtype, policy, msg
from
RudderSysEvents
where
(nodeid, executiontimestamp) in (VALUES ${nodeParam})
""" + ruleClause
boxed(s"get last run reports for ${runs.size} nodes")(jdbcTemplate.query(query, ReportsMapper).asScala.groupBy( _.nodeId))
}
}
override def findReportsByNode(nodeId : NodeId) : Seq[Reports] = {
jdbcTemplate.query(
baseQuery + " and nodeId = ? ORDER BY id desc LIMIT 1000"
, Array[AnyRef](nodeId.value)
, ReportsMapper
).asScala
}
override def findReportsByNode(
nodeId : NodeId
, ruleId : RuleId
, serial : Int
, beginDate: DateTime
, endDate : Option[DateTime]
): Seq[Reports] = {
import scala.collection.mutable.Buffer
var query = baseQuery + " and nodeId = ? and ruleId = ? and serial = ? and executionTimeStamp >= ?"
var array = Buffer[AnyRef](nodeId.value,
ruleId.value,
new java.lang.Integer(serial),
new Timestamp(beginDate.getMillis))
endDate match {
case None =>
case Some(date) => query = query + " and executionTimeStamp < ?"; array += new Timestamp(date.getMillis)
}
query = query + " ORDER BY executionTimeStamp asc"
jdbcTemplate.query(query,
array.toArray[AnyRef],
ReportsMapper).asScala
}
/**
* Return the last (really the last, serial wise, with full execution) reports for a rule
*/
override def findLastReportByRule(
ruleId : RuleId
, serial : Int
, node : Option[NodeId]
, runInterval: Int
) : Seq[Reports] = {
import scala.collection.mutable.Buffer
var query = ""
var array = Buffer[AnyRef]()
val interval = 3*runInterval
node match {
case None =>
query += joinQuery(interval) + s" and ruleId = ? and serial = ? and executionTimeStamp > (now() - interval '${interval} minutes')"
array ++= Buffer[AnyRef](ruleId.value, new java.lang.Integer(serial))
case Some(nodeId) =>
query += joinQueryByNode(interval) + s" and ruleId = ? and serial = ? and executionTimeStamp > (now() - interval '${interval} minutes') and nodeId = ?"
array ++= Buffer[AnyRef](nodeId.value, ruleId.value, new java.lang.Integer(serial), nodeId.value)
}
jdbcTemplate.query(query,
array.toArray[AnyRef],
ReportsMapper).asScala
}
/**
* Return the last (really the last, serial wise, with full execution) reports for a rule
*/
override def findLastReportsByRules(
rulesAndSerials: Set[(RuleId, Int)]
, runInterval : Int
) : Seq[Reports] = {
import scala.collection.mutable.Buffer
val interval = 3*runInterval
var query = joinQuery(interval) + " and ( 1 != 1 "
var array = Buffer[AnyRef]()
rulesAndSerials.foreach { case (ruleId, serial) =>
query += " or (ruleId = ? and serial = ?)"
array ++= Buffer[AnyRef](ruleId.value, new java.lang.Integer(serial))
}
query += s" ) and executionTimeStamp > (now() - interval '${interval} minutes')"
jdbcTemplate.query(query,
array.toArray[AnyRef],
ReportsMapper).asScala
}
override def findExecutionTimeByNode(
nodeId : NodeId
, beginDate: DateTime
, endDate : Option[DateTime]
) : Seq[DateTime] = {
val array : List[AnyRef] = nodeId.value :: new Timestamp(beginDate.getMillis) :: endDate.map( endDate => new Timestamp(endDate.getMillis) :: Nil).getOrElse(Nil)
val endQuery : String = endDate.map{_ => "and date < ?" }.getOrElse("")
val query = s"select distinct date from reportsexecution where nodeId = ? and date >= ? ${endQuery} order by date"
jdbcTemplate.query(
query
, array.toArray[AnyRef]
, ExecutionTimeMapper
).asScala
}
override def getOldestReports() : Box[Option[Reports]] = {
jdbcTemplate.query(baseQuery + " order by executionTimeStamp asc limit 1",
ReportsMapper).asScala match {
case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database")
case seq => Full(seq.headOption)
}
}
override def getOldestArchivedReports() : Box[Option[Reports]] = {
jdbcTemplate.query(baseArchivedQuery + " order by executionTimeStamp asc limit 1",
ReportsMapper).asScala match {
case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database")
case seq => Full(seq.headOption)
}
}
override def getNewestReports() : Box[Option[Reports]] = {
jdbcTemplate.query(baseQuery + " order by executionTimeStamp desc limit 1",
ReportsMapper).asScala match {
case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database")
case seq => Full(seq.headOption)
}
}
override def getNewestArchivedReports() : Box[Option[Reports]] = {
jdbcTemplate.query(baseArchivedQuery + " order by executionTimeStamp desc limit 1",
ReportsMapper).asScala match {
case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database")
case seq => Full(seq.headOption)
}
}
override def getDatabaseSize(databaseName:String) : Box[Long] = {
try {
jdbcTemplate.query(
s"""SELECT pg_total_relation_size('${databaseName}') as "size" """
, DatabaseSizeMapper
).asScala match {
case seq if seq.size > 1 => Failure(s"Too many answer for the latest report in the database '${databaseName}'")
case seq => seq.headOption ?~! s"The query used to find database '${databaseName}' size did not return any tuple"
}
} catch {
case e: DataAccessException =>
val msg ="Could not compute the size of the database, cause is " + e.getMessage()
logger.error(msg)
Failure(msg,Full(e),Empty)
}
}
override def archiveEntries(date : DateTime) : Box[Int] = {
try{
val migrate = jdbcTemplate.execute("""
insert into %s
(id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg)
(select id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg from %s
where executionTimeStamp < '%s')
""".format(archiveTable,reportsTable,date.toString("yyyy-MM-dd") )
)
logger.debug("""Archiving report with SQL query: [[
| insert into %s (id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg)
| (select id, executionDate, nodeId, directiveId, ruleId, serial, component, keyValue, executionTimeStamp, eventType, policy, msg from %s
| where executionTimeStamp < '%s')
|]]""".stripMargin.format(archiveTable,reportsTable,date.toString("yyyy-MM-dd")))
val delete = jdbcTemplate.update("""
delete from %s where executionTimeStamp < '%s'
""".format(reportsTable,date.toString("yyyy-MM-dd") )
)
jdbcTemplate.execute("vacuum %s".format(reportsTable))
Full(delete)
} catch {
case e: DataAccessException =>
val msg ="Could not archive entries in the database, cause is " + e.getMessage()
logger.error(msg)
Failure(msg,Full(e),Empty)
}
}
override def deleteEntries(date : DateTime) : Box[Int] = {
logger.debug("""Deleting report with SQL query: [[
| delete from %s where executionTimeStamp < '%s'
|]] and: [[
| delete from %s where executionTimeStamp < '%s'
|]] and: [[
| delete from %s where date < '%s'
|]]""".stripMargin.format(reportsTable, date.toString("yyyy-MM-dd")
, archiveTable, date.toString("yyyy-MM-dd")
, reportsExecutionTable, date.toString("yyyy-MM-dd")))
try{
val delete = jdbcTemplate.update("""
delete from %s where executionTimeStamp < '%s'
""".format(reportsTable,date.toString("yyyy-MM-dd") )
) + jdbcTemplate.update("""
delete from %s where executionTimeStamp < '%s'
""".format(archiveTable,date.toString("yyyy-MM-dd") )
)+ jdbcTemplate.update("""
delete from %s where date < '%s'
""".format(reportsExecutionTable,date.toString("yyyy-MM-dd") )
)
jdbcTemplate.execute("vacuum %s".format(reportsTable))
jdbcTemplate.execute("vacuum full %s".format(archiveTable))
jdbcTemplate.execute("vacuum %s".format(reportsExecutionTable))
Full(delete)
} catch {
case e: DataAccessException =>
val msg ="Could not delete entries in the database, cause is " + e.getMessage()
logger.error(msg)
Failure(msg,Full(e),Empty)
}
}
override def getHighestId : Box[Long] = {
val query = "select id from RudderSysEvents order by id desc limit 1"
try {
jdbcTemplate.query(query,IdMapper).asScala match {
case seq if seq.size > 1 => Failure("Too many answer for the highest id in the database")
case seq => seq.headOption ?~! "No report where found in database (and so, we can not get highest id)"
}
} catch {
case e:DataAccessException =>
logger.error("Could not fetch highest id in the database. Reason is : %s".format(e.getMessage()))
Failure(e.getMessage())
}
}
override def getLastHundredErrorReports(kinds:List[String]) : Box[Seq[(Reports,Long)]] = {
val query = "%s and (%s) order by executiondate desc limit 100".format(idQuery,kinds.map("eventtype='%s'".format(_)).mkString(" or "))
try {
Full(jdbcTemplate.query(query,ReportsWithIdMapper).asScala)
} catch {
case e:DataAccessException =>
logger.error("Could not fetch last hundred reports in the database. Reason is : %s".format(e.getMessage()))
Failure("Could not fetch last hundred reports in the database. Reason is : %s".format(e.getMessage()))
}
}
override def getReportsWithLowestId : Box[Option[(Reports,Long)]] = {
jdbcTemplate.query(s"${idQuery} order by id asc limit 1",
ReportsWithIdMapper).asScala match {
case seq if seq.size > 1 => Failure("Too many answer for the latest report in the database")
case seq => Full(seq.headOption)
}
}
/**
* From an id and an end date, return a list of AgentRun, and the max ID that has been considered
*/
override def getReportsfromId(lastProcessedId: Long, endDate: DateTime) : Box[(Seq[AgentRun], Long)] = {
def getMaxId(fromId: Long, before: DateTime): Box[Long] = {
val queryForMaxId = "select max(id) as id from RudderSysEvents where id > ? and executionTimeStamp < ?"
val params = Array[AnyRef](new java.lang.Long(fromId), new Timestamp(endDate.getMillis))
try {
jdbcTemplate.query(queryForMaxId, params, IdMapper).asScala match {
case seq if seq.size > 1 => Failure("Too many answer for the highest id in the database")
case seq =>
//sometimes, max on postgres return 0
val newId = scala.math.max(fromId, seq.headOption.getOrElse(0L))
Full(newId)
}
} catch {
case e:DataAccessException =>
val msg = s"Could not fetch max id for execution in the database. Reason is : ${e.getMessage}"
logger.error(msg)
Failure(msg, Full(e), Empty)
}
}
def getRuns(fromId: Long, toId: Long): Box[Seq[AgentRun]] = {
import java.lang.{Long => jLong}
val getRunsQuery = """select distinct
| T.nodeid, T.executiontimestamp, coalesce(C.iscomplete, false) as complete, coalesce(C.msg, '') as nodeconfigid, T.insertionid
|from
| (select nodeid, executiontimestamp, min(id) as insertionid from ruddersysevents where id > ? and id <= ? group by nodeid, executiontimestamp) as T
|left join
| (select
| true as isComplete, nodeid, executiontimestamp, msg
| from
| ruddersysevents where id > ? and id <= ? and
| ruleId like 'hasPolicyServer%' and
| component = 'common' and keyValue = 'EndRun'
| ) as C
|on T.nodeid = C.nodeid and T.executiontimestamp = C.executiontimestamp""".stripMargin
if(fromId >= toId) {
Full(Seq())
} else {
val params = Array[AnyRef](new jLong(fromId), new jLong(toId), new jLong(fromId), new jLong(toId))
try {
Full(jdbcTemplate.query(getRunsQuery, params, ReportsExecutionMapper).asScala)
} catch {
case e:DataAccessException =>
val msg = s"Could not fetch agent executions in the database. Reason is : ${e.getMessage}"
logger.error(msg)
Failure(msg, Full(e), Empty)
}
}
}
//actual logic for getReportsfromId
for {
toId <- getMaxId(lastProcessedId, endDate)
reports <- getRuns(lastProcessedId, toId)
} yield {
(reports, toId)
}
}
override def getChangeReports(notBefore: DateTime): Box[Seq[ResultRepairedReport]] = {
val query = s"${baseQuery} and eventtype='${Reports.RESULT_REPAIRED}' and executionTimeStamp > '${new Timestamp(notBefore.getMillis)}'::timestamp order by executionTimeStamp asc"
try {
Full(jdbcTemplate.query(query,ReportsMapper).asScala.collect{case r:ResultRepairedReport => r})
} catch {
case ex: Exception =>
val error = Failure("Error when trying to retrieve change reports", Some(ex), Empty)
logger.error(error)
error
}
}
override def getErrorReportsBeetween(lower : Long, upper:Long,kinds:List[String]) : Box[Seq[Reports]] = {
if (lower>=upper)
Empty
else{
val query = "%s and id between '%d' and '%d' and (%s) order by executiondate asc".format(baseQuery,lower,upper,kinds.map("eventtype='%s'".format(_)).mkString(" or "))
try {
Full(jdbcTemplate.query(query,ReportsMapper).asScala)
} catch {
case e:DataAccessException =>
logger.error("Could not fetch reports between ids %d and %d in the database. Reason is : %s".format(lower,upper,e.getMessage()))
Failure("Could not fetch reports between ids %d and %d in the database. Reason is : %s".format(lower,upper,e.getMessage()))
}
}
}
}
object ReportsMapper extends RowMapper[Reports] {
def mapRow(rs : ResultSet, rowNum: Int) : Reports = {
Reports(
new DateTime(rs.getTimestamp("executionDate"))
, RuleId(rs.getString("ruleId"))
, DirectiveId(rs.getString("directiveId"))
, NodeId(rs.getString("nodeId"))
, rs.getInt("serial")
, rs.getString("component")
, rs.getString("keyValue")
, new DateTime(rs.getTimestamp("executionTimeStamp"))
, rs.getString("eventType")
, rs.getString("msg")
//what about policy ? => contains the technique name, not used directly by Rudder
)
}
}
object ExecutionTimeMapper extends RowMapper[DateTime] {
def mapRow(rs : ResultSet, rowNum: Int) : DateTime = {
new DateTime(rs.getTimestamp("date"))
}
}
object DatabaseSizeMapper extends RowMapper[Long] {
def mapRow(rs : ResultSet, rowNum: Int) : Long = {
rs.getLong("size")
}
}
object IdMapper extends RowMapper[Long] {
def mapRow(rs : ResultSet, rowNum: Int) : Long = {
rs.getLong("id")
}
}
object ReportsWithIdMapper extends RowMapper[(Reports,Long)] {
def mapRow(rs : ResultSet, rowNum: Int) : (Reports,Long) = {
(ReportsMapper.mapRow(rs, rowNum),IdMapper.mapRow(rs, rowNum))
}
}
object ReportsExecutionMapper extends RowMapper[AgentRun] {
//we want to match: """End execution with config [75rz605art18a05]"""
// the (?s) allows . to match any characters, even non displayable ones
val nodeConfigVersionRegex = """(?s).+\[([^\]]+)\].*""".r
def mapRow(rs : ResultSet, rowNum: Int) : AgentRun = {
AgentRun(
AgentRunId(NodeId(rs.getString("nodeid")), new DateTime(rs.getTimestamp("executiontimestamp")))
, {
val s = rs.getString("nodeconfigid")
if(s == null) {
None
} else {
s match {
//check if we have the version and modify the report accordingly
case nodeConfigVersionRegex(v) =>
Some(NodeConfigId(v))
case _ =>
None
}
}
}
, rs.getBoolean("complete")
, rs.getLong("insertionid")
)
}
}
|
Kegeruneku/rudder
|
rudder-core/src/main/scala/com/normation/rudder/repository/jdbc/ReportsJdbcRepository.scala
|
Scala
|
agpl-3.0
| 22,805 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.support
import org.fusesource.scalate.TemplateEngine
import org.fusesource.scalate.util.Files
/**
* A helper object to find a template from a URI using a number of possible extensions and directories
*/
class TemplateFinder(engine: TemplateEngine) {
var hiddenPatterns = List("""^_|/_""".r, """^\.|/\.""".r)
var replacedExtensions = List(".html", ".htm")
lazy val extensions = engine.extensions
def findTemplate(path: String): Option[String] = {
var rc = Option(engine.finderCache.get(path))
if (rc.isEmpty) {
rc = search(path)
if (rc.isDefined && !engine.isDevelopmentMode) {
engine.finderCache.put(path, rc.get)
}
}
rc
}
def search(rootOrPath: String): Option[String] = {
val path = if (rootOrPath == "/") "/index" else rootOrPath
if (hiddenPatterns.exists(_.findFirstIn(path).isDefined)) {
return None
}
// Is the uri a direct path to a template??
// i.e: /path/page.jade -> /path/page.jade
def findDirect(uri: String): Option[String] = {
engine.templateDirectories.flatMap { base =>
extensions.flatMap { ext =>
val path = base + uri
if (path.endsWith(ext) && engine.resourceLoader.exists(path)) {
Some(path)
} else {
None
}
}
}.headOption
}
// Lets try to find the template by appending a template extension to the path
// i.e: /path/page.html -> /path/page.html.jade
def findAppended(uri: String): Option[String] = {
engine.templateDirectories.flatMap { base =>
extensions.flatMap { ext =>
val path = base + uri + "." + ext
if (engine.resourceLoader.exists(path)) {
Some(path)
} else {
None
}
}
}.headOption
}
// Lets try to find the template by replacing the extension
// i.e: /path/page.html -> /path/page.jade
def findReplaced(): Option[String] = {
replacedExtensions.flatMap {
ext =>
if (path.endsWith(ext)) {
findAppended(path.stripSuffix(ext))
} else {
None
}
}.headOption
}
// Lets try to find the template for well known template extensions
// i.e:
// /path/page.css -> List(/path/page.sass, /path/page.scss)
// /path/page.js -> List(/path/page.coffee)
def findTemplateAlias(uri: String): Option[String] = {
val ext = Files.extension(uri)
lazy val remaining = path.stripSuffix(ext)
if (ext.size > 0) {
engine.extensionToTemplateExtension.get(ext).flatMap {
set =>
engine.templateDirectories.flatMap { base =>
set.flatMap { ext =>
val path = base + remaining + ext
if (engine.resourceLoader.exists(path)) {
Some(path)
} else {
None
}
}
}.headOption
}
} else {
None
}
}
findDirect(path).orElse(findAppended(path).orElse(findTemplateAlias(path).orElse(findReplaced())))
}
}
|
scalate/scalate
|
scalate-core/src/main/scala/org/fusesource/scalate/support/TemplateFinder.scala
|
Scala
|
apache-2.0
| 3,891 |
package uk.co.bocuma.fourchan
case class Board(board: String, title: Option[String] = None, threads: Option[List[Thread]] = None) extends FourChanEntity
|
cammellos/fourchan-scala
|
src/main/scala/uk/co/bocuma/fourchan/Board.scala
|
Scala
|
apache-2.0
| 153 |
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import com.mongodb.CursorType
import com.mongodb.async.SingleResultCallback
import com.mongodb.async.client.FindIterable
import org.mongodb.scala.bson.conversions.Bson
import org.mongodb.scala.internal.ObservableHelper._
import org.mongodb.scala.model.Collation
/**
* Observable interface for Find.
*
* @param wrapped the underlying java FindObservable
* @tparam TResult The type of the result.
* @since 1.0
*/
case class FindObservable[TResult](private val wrapped: FindIterable[TResult]) extends Observable[TResult] {
/**
* Helper to return a Observable limited to just the first result the query.
*
* **Note:** Sets limit in the background so only returns 1.
*
* @return a Observable which will return the first item
*/
def first(): SingleObservable[TResult] = observe(wrapped.first(_: SingleResultCallback[TResult]))
/**
* Sets the query filter to apply to the query.
*
* [[http://docs.mongodb.org/manual/reference/method/db.collection.find/ Filter]]
* @param filter the filter, which may be null.
* @return this
*/
def filter(filter: Bson): FindObservable[TResult] = {
wrapped.filter(filter)
this
}
/**
* Sets the limit to apply.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.limit/#cursor.limit Limit]]
* @param limit the limit, which may be null
* @return this
*/
def limit(limit: Int): FindObservable[TResult] = {
wrapped.limit(limit)
this
}
/**
* Sets the number of documents to skip.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.skip/#cursor.skip Skip]]
* @param skip the number of documents to skip
* @return this
*/
def skip(skip: Int): FindObservable[TResult] = {
wrapped.skip(skip)
this
}
/**
* Sets the maximum execution time on the server for this operation.
*
* [[http://docs.mongodb.org/manual/reference/operator/meta/maxTimeMS/ Max Time]]
* @param duration the duration
* @return this
*/
def maxTime(duration: Duration): FindObservable[TResult] = {
wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
this
}
/**
* The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor
* query. This only applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor,
* this option is ignored.
*
* On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default
* is no value: no "maxTimeMS" is sent to the server with the getMore command.
*
* On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value
*
* A zero value will be ignored.
*
* [[http://docs.mongodb.org/manual/reference/operator/meta/maxTimeMS/ Max Time]]
* @param duration the duration
* @return the maximum await execution time in the given time unit
* @since 1.1
*/
def maxAwaitTime(duration: Duration): FindObservable[TResult] = {
wrapped.maxAwaitTime(duration.toMillis, TimeUnit.MILLISECONDS)
this
}
/**
* Sets the query modifiers to apply to this operation.
*
* [[http://docs.mongodb.org/manual/reference/operator/query-modifier/ Query Modifiers]]
* @param modifiers the query modifiers to apply, which may be null.
* @return this
*/
def modifiers(modifiers: Bson): FindObservable[TResult] = {
wrapped.modifiers(modifiers)
this
}
/**
* Sets a document describing the fields to return for all matching documents.
*
* [[http://docs.mongodb.org/manual/reference/method/db.collection.find/ Projection]]
* @param projection the project document, which may be null.
* @return this
*/
def projection(projection: Bson): FindObservable[TResult] = {
wrapped.projection(projection)
this
}
/**
* Sets the sort criteria to apply to the query.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.sort/ Sort]]
* @param sort the sort criteria, which may be null.
* @return this
*/
def sort(sort: Bson): FindObservable[TResult] = {
wrapped.sort(sort)
this
}
/**
* The server normally times out idle cursors after an inactivity period (10 minutes)
* to prevent excess memory use. Set this option to prevent that.
*
* @param noCursorTimeout true if cursor timeout is disabled
* @return this
*/
def noCursorTimeout(noCursorTimeout: Boolean): FindObservable[TResult] = {
wrapped.noCursorTimeout(noCursorTimeout)
this
}
/**
* Users should not set this under normal circumstances.
*
* @param oplogReplay if oplog replay is enabled
* @return this
*/
def oplogReplay(oplogReplay: Boolean): FindObservable[TResult] = {
wrapped.oplogReplay(oplogReplay)
this
}
/**
* Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error).
*
* @param partial if partial results for sharded clusters is enabled
* @return this
*/
def partial(partial: Boolean): FindObservable[TResult] = {
wrapped.partial(partial)
this
}
/**
* Sets the cursor type.
*
* @param cursorType the cursor type
* @return this
*/
def cursorType(cursorType: CursorType): FindObservable[TResult] = {
wrapped.cursorType(cursorType)
this
}
/**
* Sets the collation options
*
* @param collation the collation options to use
* @return this
* @since 1.2
* @note A null value represents the server default.
* @note Requires MongoDB 3.4 or greater
*/
def collation(collation: Collation): FindObservable[TResult] = {
wrapped.collation(collation)
this
}
/**
* Sets the batch size.
* @param batchSize the batch size.
* @since 2.1.0
* @return this
* @note Specifying 1 or a negative number is analogous to using the limit() method.
*/
def batchSize(batchSize: Int): FindObservable[TResult] = {
wrapped.batchSize(batchSize)
this
}
override def subscribe(observer: Observer[_ >: TResult]): Unit = observe(wrapped).subscribe(observer)
}
|
jCalamari/mongo-scala-driver
|
driver/src/main/scala/org/mongodb/scala/FindObservable.scala
|
Scala
|
apache-2.0
| 6,809 |
package com.github.jeanadrien.gatling.mqtt
import java.nio.charset.StandardCharsets
import com.github.jeanadrien.gatling.mqtt.actions._
import com.github.jeanadrien.gatling.mqtt.protocol.MqttProtocolBuilder
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session._
/**
*
*/
object Predef {
def mqtt(implicit configuration : GatlingConfiguration) = MqttProtocolBuilder(configuration)
def connect = ConnectActionBuilder()
def subscribe(topic : Expression[String]) = SubscribeActionBuilder(topic)
def publish[T <% MqttPayload](
topic : Expression[String], payload : Expression[T]
) = PublishActionBuilder(topic, payload.map(_.toByteArray))
def publishAndWait[T <% MqttPayload](
topic : Expression[String], payload : Expression[T]
) = PublishAndWaitActionBuilder(topic, payload.map(_.toByteArray))
def waitForMessages = WaitForMessagesActionBuilder
def payload(in : Expression[String]) : Expression[Array[Byte]] =
in.map(_.getBytes(StandardCharsets.UTF_8))
trait MqttPayload {
def toByteArray : Array[Byte]
}
implicit class StringMqttPayload(s : String) extends MqttPayload {
override def toByteArray = s.getBytes
}
implicit def byteArrayPayload(b : Array[Byte]) : MqttPayload = new MqttPayload {
override def toByteArray = b
}
}
|
jeanadrien/gatling-mqtt-protocol
|
src/main/scala/com/github/jeanadrien/gatling/mqtt/Predef.scala
|
Scala
|
apache-2.0
| 1,377 |
package xyz.jmullin.drifter.entity
import com.badlogic.gdx.graphics.g3d.{Environment, ModelBatch}
import xyz.jmullin.drifter.DrifterInput
import xyz.jmullin.drifter.enrich.RichGeometry._
import xyz.jmullin.drifter.GdxAlias._
/**
* General purpose container for Entity3Ds, allows attachment and management of children entities and passes
* through render and update events to attached children appropriately.
*/
trait EntityContainer3D extends DrifterInput {
// Implicit layer for local context
implicit def layer: Option[Layer3D]
/**
* List of all attached children.
*/
var children = List[Entity3D]()
/**
* If defined, the position the cursor should be locked to for picking purposes.
*/
var mouseLocked: Option[V2] = None
/**
* Remove an entity from this container and any of its children.
*
* @param e Entity to remove.
*/
def remove(e: Entity3D): Unit = {
children = children.filterNot(_.equals(e))
children.foreach(_.remove(e))
e.parent = None
}
/**
* Adds an entity to the container and sets parent/child relationships as necessary.
*
* @param e Entity to add.
* @return The added entity.
*/
def add(e: Entity3D) = {
children ::= e
e.parent = Some(this)
e.create(this)
e
}
/**
* Draws all attached children entities.
*
* @param batch Active SpriteBatch to use in drawing.
*/
def renderChildren(implicit batch: ModelBatch, environment: Environment) {
children.foreach(_.render(batch, environment))
}
/**
* Updates all attached children.
*
* @param delta Time elapsed since the last update tick.
*/
def updateChildren(delta: Float) {
children.foreach(_.update(delta))
}
// Input events are aggregated through this container's children and coalesced to a single hit result.
def mouseEvent(v: V2, event: (Entity3D, V3) => Boolean): Boolean = {
(for(camera <- layer.map(_.camera)) yield {
val pickOrigin = mouseLocked.getOrElse(V2(mouseX, gameH-mouseY))
val hits = children.flatMap(e => e.hitPoint(camera.getPickRay(pickOrigin.x, pickOrigin.y)).map(e -> _))
val closest = hits.sortBy { case (entity, hit) => (camera.position - hit).len() }
closest.exists(event.tupled)
}).getOrElse(false)
}
override def touchDown(v: V2, pointer: Int, button: Int): Boolean = mouseEvent(v, _.touchDown(_, button))
override def touchUp(v: V2, pointer: Int, button: Int): Boolean = mouseEvent(v, _.touchUp(_, button))
override def touchDragged(v: V2, pointer: Int): Boolean = mouseEvent(v, _.touchDragged(_))
override def mouseMoved(v: V2): Boolean = mouseEvent(v, _.mouseMoved(_))
override def keyDown(keycode: Int): Boolean = {
val hit = children.find(_.keyDown(keycode))
hit.isDefined
}
override def keyUp(keycode: Int): Boolean = {
val hit = children.find(_.keyUp(keycode))
hit.isDefined
}
override def keyTyped(character: Char): Boolean = {
val hit = children.find(_.keyTyped(character))
hit.isDefined
}
override def scrolled(amount: Int): Boolean = {
val hit = children.find(_.scrolled(amount))
hit.isDefined
}
}
|
JustinMullin/drifter
|
src/main/scala/xyz/jmullin/drifter/entity/EntityContainer3D.scala
|
Scala
|
mit
| 3,148 |
package org.jetbrains.plugins.scala
package lang
package completion
import com.intellij.codeInsight.completion.{CompletionParameters, PrefixMatcher}
import com.intellij.openapi.util.Key
import com.intellij.psi._
import org.jetbrains.plugins.scala.lang.lexer._
import org.jetbrains.plugins.scala.lang.parser._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.NameSuggester
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 21.05.2008.
*/
object ScalaCompletionUtil {
val PREFIX_COMPLETION_KEY: Key[Boolean] = Key.create("prefix.completion.key")
def completeThis(ref: ScReferenceExpression): Boolean = {
ref.qualifier match {
case Some(_) => false
case None =>
ref.getParent match {
case inf: ScInfixExpr if inf.operation == ref => false
case postf: ScPostfixExpr if postf.operation == ref => false
case pref: ScPrefixExpr if pref.operation == ref => false
case _ => true
}
}
}
def shouldRunClassNameCompletion(dummyPosition: PsiElement, parameters: CompletionParameters, prefixMatcher: PrefixMatcher,
checkInvocationCount: Boolean = true, lookingForAnnotations: Boolean = false): Boolean = {
if (checkInvocationCount && parameters.getInvocationCount < 2) return false
if (dummyPosition.getNode.getElementType == ScalaTokenTypes.tIDENTIFIER) {
dummyPosition.getParent match {
case ref: ScReferenceElement if ref.qualifier.isDefined => return false
case _ =>
}
}
if (checkInvocationCount && parameters.getInvocationCount >= 2) return true
val prefix = prefixMatcher.getPrefix
val capitalized = prefix.length() > 0 && prefix.substring(0, 1).capitalize == prefix.substring(0, 1)
capitalized || lookingForAnnotations
}
def generateAnonymousFunctionText(braceArgs: Boolean, params: scala.Seq[ScType], canonical: Boolean,
withoutEnd: Boolean = false, arrowText: String = "=>"): String = {
val text = new StringBuilder()
if (braceArgs) text.append("case ")
val paramNamesWithTypes = new ArrayBuffer[(String, ScType)]
def contains(name: String): Boolean = {
paramNamesWithTypes.exists{
case (s, _) => s == name
}
}
for (param <- params) {
val names = NameSuggester.suggestNamesByType(param)
var name = if (names.length == 0) "x" else names(0)
if (contains(name)) {
var count = 0
var newName = name + count
while (contains(newName)) {
count += 1
newName = name + count
}
name = newName
}
paramNamesWithTypes.+=(name -> param)
}
val iter = paramNamesWithTypes.map {
case (s, tp) => s + ": " + (if (canonical) {
ScType.canonicalText(tp)
} else ScType.presentableText(tp))
}
val paramsString =
if (paramNamesWithTypes.size != 1 || !braceArgs) iter.mkString("(", ", ", ")")
else iter.head
text.append(paramsString)
if (!withoutEnd) text.append(" ").append(arrowText)
text.toString()
}
def getLeafByOffset(offset: Int, element: PsiElement): PsiElement = {
if (offset < 0) {
return null
}
var candidate: PsiElement = element.getContainingFile
if (candidate == null || candidate.getNode == null) return null
while (candidate.getNode.getChildren(null).nonEmpty) {
candidate = candidate.findElementAt(offset)
if (candidate == null || candidate.getNode == null) return null
}
candidate
}
/**
* first return value mean to stop here.
* Second return value in case if first is true return second value
*/
def getForAll(parent: PsiElement, leaf: PsiElement): (Boolean, Boolean) = {
parent match {
case _: ScalaFile =>
if (leaf.getNextSibling != null && leaf.getNextSibling.getNextSibling.isInstanceOf[ScPackaging] &&
leaf.getNextSibling.getNextSibling.getText.indexOf('{') == -1)
return (true, false)
case _ =>
}
parent match {
case _: ScalaFile | _: ScPackaging =>
var node = leaf.getPrevSibling
if (node.isInstanceOf[PsiWhiteSpace]) node = node.getPrevSibling
node match {
case x: PsiErrorElement =>
val s = ErrMsg("wrong.top.statment.declaration")
x.getErrorDescription match {
case `s` => return (true, true)
case _ => return (true, false)
}
case _ => return (true, true)
}
case expr: ScReferenceExpression =>
parent.getParent match {
case _: ScBlockExpr | _: ScTemplateBody | _: ScBlock | _: ScCaseClause =>
if (awful(parent, leaf))
return (true, true)
case _ =>
}
case _ =>
}
(false, true)
}
def awful(parent: PsiElement, leaf: PsiElement): Boolean = {
(leaf.getPrevSibling == null || leaf.getPrevSibling.getPrevSibling == null ||
leaf.getPrevSibling.getPrevSibling.getNode.getElementType != ScalaTokenTypes.kDEF) &&
(parent.getPrevSibling == null || parent.getPrevSibling.getPrevSibling == null ||
(parent.getPrevSibling.getPrevSibling.getNode.getElementType != ScalaElementTypes.MATCH_STMT ||
!parent.getPrevSibling.getPrevSibling.getLastChild.isInstanceOf[PsiErrorElement]))
}
val DUMMY_IDENTIFIER = "IntellijIdeaRulezzz"
def checkClassWith(clazz: ScTypeDefinition, additionText: String, manager: PsiManager): Boolean = {
val classText: String = clazz.getText
val text = removeDummy(classText + " " + additionText)
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkElseWith(text: String, manager: PsiManager): Boolean = {
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, "class a {\\n" + text + "\\n}").asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkDoWith(text: String, manager: PsiManager): Boolean = {
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, "class a {\\n" + text + "\\n}").asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkTypeWith(typez: ScTypeElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = typez.getText
val text = removeDummy("class a { x:" + typeText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
val value = !checkErrors(dummyFile)
value
}
def checkAnyTypeWith(typez: ScTypeElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = typez.getText
val text = removeDummy("class a { val x:" + typeText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
val value = !checkErrors(dummyFile)
value
}
def checkAnyWith(typez: PsiElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = typez.getText
val text = removeDummy("class a { " + typeText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def removeDummy(text: String): String = {
replaceDummy(text, "")
}
def replaceDummy(text: String, to: String): String = {
if (text.indexOf(DUMMY_IDENTIFIER) != -1) {
text.replaceAll("\\\\w*" + DUMMY_IDENTIFIER,to)
} else text
}
def checkNewWith(news: ScNewTemplateDefinition, additionText: String, manager: PsiManager): Boolean = {
val newsText = news.getText
val text = removeDummy("class a { " + newsText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkReplace(elem: PsiElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = elem.getText
var text = "class a { " + typeText + "}"
if (text.indexOf(DUMMY_IDENTIFIER) == -1) return false
text = replaceDummy(text, " "+ additionText+ " ")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
private def checkErrors(elem: PsiElement): Boolean = {
elem match {
case _: PsiErrorElement => return true
case _ =>
}
val iterator = elem.getChildren.iterator
while (iterator.hasNext) {
val child = iterator.next()
if (checkErrors(child)) return true
}
false
}
/**
* @param leaf Start PsiElement
* @return (End PsiElement, ContainingFile.isScriptFile)
*/
def processPsiLeafForFilter(leaf: PsiElement): (PsiElement, Boolean) = Option(leaf) map {
l => l.getContainingFile match {
case scriptFile: ScalaFile if scriptFile.isScriptFile() => (leaf.getParent, true)
case scalaFile: ScalaFile => (leaf, false)
case _ => (null, false)
}
} getOrElse (null, false)
}
|
advancedxy/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/completion/ScalaCompletionUtil.scala
|
Scala
|
apache-2.0
| 10,849 |
package alexsmirnov.pbconsole.octoprint
import org.scalatra.FutureSupport
import org.scalatra.NoContent
import org.scalatra.NotFound
import org.scalatra.ScalatraServlet
import alexsmirnov.pbconsole.PrinterModel
import alexsmirnov.pbconsole.print.JobModel
import scalafx.application.Platform
import spray.json.DefaultJsonProtocol
import spray.json.pimpString
import alexsmirnov.pbconsole.PrinterModel.Heater
object VersionRoute extends DefaultJsonProtocol {
case class VersionResponse(api: String, server: String)
implicit val vrFormat = jsonFormat2(VersionResponse)
}
class VersionRoute extends ScalatraServlet with SprayJsonSupport {
import VersionRoute._
get("/") {
VersionResponse("0.1", "0.01").toJson
}
}
|
alexsmirnov/printrbot-g2-console
|
src/main/scala/alexsmirnov/pbconsole/octoprint/VersionRoute.scala
|
Scala
|
bsd-3-clause
| 727 |
package org.oxygen.redio.items
import net.minecraft.item.Item
import org.oxygen.redio.CreativeTab
object ItemComputeCore extends Item
{
setCreativeTab(CreativeTab)
setMaxStackSize(1)
setUnlocalizedName("compute_core")
}
|
chenzhuoyu/RedIO
|
src/main/scala/org/oxygen/redio/items/ItemComputeCore.scala
|
Scala
|
lgpl-2.1
| 225 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.