code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.avsystem.scex
package compiler
import java.{lang => jl, util => ju}
import com.avsystem.scex.parsing.{ShiftInfo, ShiftInfoPositionMapping}
import scala.collection.immutable.SortedMap
/**
* Created: 24-10-2013
* Author: ghik
*/
class ShiftInfoPositionMappingTest extends ScexFunSuite {
test("empty mapping test") {
val mapping = new ShiftInfoPositionMapping(SortedMap.empty, SortedMap.empty)
val reverse = mapping.reverse
for (i <- -5 to 5) {
assert(mapping(i) === i)
}
for (i <- -5 to 5) {
assert(reverse(i) === i)
}
}
test("something was added at the beginning") {
val added = 5
val mapping = new ShiftInfoPositionMapping(SortedMap(
0 -> ShiftInfo(0, added)
), null)
for (i <- -5 until 0) {
assert(mapping(i) === i)
}
for (i <- 0 to 10) {
assert(mapping(i) === i + added)
}
}
test("something was removed at the beginning") {
val removed = 5
val mapping = new ShiftInfoPositionMapping(SortedMap(
0 -> ShiftInfo(0, -removed)
), null)
for (i <- -5 until 0) {
assert(mapping(i) === i)
}
for (i <- 0 to removed) {
assert(mapping(i) === 0)
}
for (i <- removed to 10) {
assert(mapping(i) == i - removed)
}
}
test("something was added and removed at the beginning") {
val added = 3
val removed = 5
val mapping = new ShiftInfoPositionMapping(SortedMap(
0 -> ShiftInfo(0, added, removed)
), null)
for (i <- -5 until 0) {
assert(mapping(i) === i)
}
for (i <- 0 until removed) {
assert(mapping(i) === 0)
}
for (i <- removed to 10) {
assert(mapping(i) == i - removed + added)
}
}
test("more complex test") {
/*
0123 45678 901234567
oooraaaaaoorrraorroooooo
012334567890000122234567
*/
val mapping = new ShiftInfoPositionMapping(SortedMap(
3 -> ShiftInfo(0, 5, 1),
6 -> ShiftInfo(4, 1, 3),
10 -> ShiftInfo(2, 0, 2)
), null)
val results = Array(0, 1, 2, 3, 8, 9, 10, 10, 10, 11, 12, 12, 12, 13, 14, 15, 16, 17, 18)
for (i <- -5 until 0) {
assert(mapping(i) === i)
}
for (i <- 0 until results.length) {
assert(mapping(i) === results(i))
}
}
}
|
pnf/scex
|
scex-derived/src/test/scala/com/avsystem/scex/compiler/ShiftInfoPositionMappingTest.scala
|
Scala
|
apache-2.0
| 2,265 |
/*******************************************************************************
* This file is part of tiscaf.
*
* tiscaf is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Foobar is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with tiscaf. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package tiscaf
import java.nio.ByteBuffer
import java.nio.channels.{ Selector, SelectionKey, SocketChannel }
import javax.net.ssl._
import scala.concurrent.ExecutionContext
private trait HPeer extends HLoggable {
protected[this] val plexerBarrier = new java.util.concurrent.CyclicBarrier(1)
//------------------- to implement ------------------------------
def plexer: HPlexer
def key: SelectionKey
def bufferSize: Int
val acceptor: HAcceptor
def submit(toRun: =>Unit): Unit
implicit def talksExe: ExecutionContext
//-------------------------------------------------------------------
final def connClose = plexer.needToClose(key)
final def connWrite = plexer.needToWrite(key)
final def connRead = plexer.needToRead(key)
final def channel: SocketChannel = key.channel.asInstanceOf[SocketChannel]
final def remoteIp = channel.socket.getInetAddress.getHostAddress
def dispose: Unit
// core place - main iteration
def readChannel: Unit
final def proceedToWrite: Unit = plexerBarrier.reset // called by plexer
// ByteBuffer.wrap(ar, offset, length) is slower in my tests rather direct array putting
def writeToChannel(ar: Array[Byte], offset: Int, length: Int): Unit
// for me it rarely happens with big (say, >= 64KB) buffers only
final def writeAside(buf: ByteBuffer, sc: SocketChannel): Boolean = {
val tmpSelector = Selector.open
val theKey = sc.register(tmpSelector, SelectionKey.OP_WRITE)
theKey.attach(System.currentTimeMillis)
@scala.annotation.tailrec
def nextSelect: Boolean = if (buf.hasRemaining) {
if (theKey.attachment.asInstanceOf[Long] + plexer.timeoutMillis < System.currentTimeMillis) {
warning("Connection timeout")
tmpSelector.close
false
} else {
tmpSelector.select(200)
val it = tmpSelector.selectedKeys.iterator
if (it.hasNext) {
val aKey = it.next
it.remove
sc.write(buf)
aKey.attach(System.currentTimeMillis)
}
nextSelect
}
} else { tmpSelector.close; true }
nextSelect
}
}
private trait HSimplePeer extends HPeer {
private val theBuf = ByteBuffer.allocate(bufferSize)
final def dispose = {
plexerBarrier.reset
connClose
}
final def readChannel: Unit = try {
def doTalkItself =
acceptor.talk onSuccess {
case PeerWant.Read => acceptor.in.reset; connRead // new alive request/response
case PeerWant.Close => connClose
case x => sys.error("unexpected PeerWant value " + x)
}
theBuf.clear
val wasRead = channel.read(theBuf)
if (wasRead == -1) dispose // counterpart peer wants to write but writes nothing
else
submit {
acceptor.accept(theBuf.array.take(wasRead))
acceptor.in.reqState match {
case HReqState.IsReady => acceptor.resolveAppLet; doTalkItself
case HReqState.IsInvalid => dispose
case _ /* WaitsForXxx */ => connRead
}
}
} catch {
case e: Exception =>
error("A problem occurred while reading request data", e)
dispose
}
// ByteBuffer.wrap(ar, offset, length) is slower in my tests rather direct array putting
final def writeToChannel(ar: Array[Byte], offset: Int, length: Int) = {
if (length > 0) {
theBuf.clear
theBuf.put(ar, offset, length)
theBuf.flip
plexerBarrier.await
channel.write(theBuf)
if (theBuf.hasRemaining && !writeAside(theBuf, channel)) dispose
}
// it is valid to use connWrite here even for case when HOut will switch to
// read later:
// - counterpart peer will not be ready to read, and selector will not
// select a key,
// - if by some reason selector still selects a key for writing, it will just
// reset not awaited barrier, as far as HOut doesn't write anything.
connWrite
}
}
private trait HSslPeer extends HPeer {
//------------------- to implement ------------------------------
def engine: SSLEngine
//-------------------------------------------------------------------
val session = engine.getSession
private val appBuffer = ByteBuffer.allocate(session.getApplicationBufferSize)
private val netBuffer = ByteBuffer.allocate(session.getPacketBufferSize)
final def dispose = {
plexerBarrier.reset
engine.closeOutbound
appBuffer.clear
// flush the net buffer
channel.write(netBuffer)
netBuffer.clear
// send the close message
val res = engine.wrap(appBuffer, netBuffer)
res.getStatus match {
case SSLEngineResult.Status.CLOSED =>
netBuffer.flip
channel.write(netBuffer)
case st => sys.error("Invalid closing state: " + st)
}
connClose
}
final def readChannel: Unit = try {
def doTalkItself =
acceptor.talk match {
case PeerWant.Read => acceptor.in.reset; connRead // new alive request/response
case PeerWant.Close => connClose
case x => sys.error("unexpected PeerWant value " + x)
}
// clear the buffers before processing
appBuffer.clear
netBuffer.clear
val wasRead = channel.read(netBuffer)
if (wasRead == -1) dispose // counterpart peer wants to write but writes nothing
else {
submit {
netBuffer.flip
var read = 0
var continue = true
while (continue && netBuffer.hasRemaining) {
val res = engine.unwrap(netBuffer, appBuffer)
read += res.bytesProduced
import SSLEngineResult.Status
if (res.getStatus == Status.BUFFER_UNDERFLOW) {
netBuffer.position(netBuffer.limit)
netBuffer.limit(netBuffer.capacity)
channel.read(netBuffer)
netBuffer.flip
} else if (res.getStatus == Status.CLOSED) {
continue = false
}
}
acceptor.accept(appBuffer.array.take(read))
acceptor.in.reqState match {
case HReqState.IsReady => acceptor.resolveAppLet; doTalkItself
case HReqState.IsInvalid => dispose
case _ /* WaitsForXxx */ => connRead
}
}
}
} catch {
case e: Exception =>
error("A problem occurred while readin ssl request data", e)
dispose
}
// ByteBuffer.wrap(ar, offset, length) is slower in my tests rather direct array putting
final def writeToChannel(ar: Array[Byte], offset: Int, length: Int) = {
// flush the net buffer
if (netBuffer.hasRemaining) {
plexerBarrier.await
channel.write(netBuffer)
}
if (length > 0) {
// clear the buffers before processing
appBuffer.clear
netBuffer.clear
appBuffer.put(ar, offset, length)
appBuffer.flip
var continue = true
while (continue && appBuffer.hasRemaining) {
val res = engine.wrap(appBuffer, netBuffer)
import SSLEngineResult.Status
if (res.getStatus == Status.BUFFER_UNDERFLOW) {
appBuffer.position(appBuffer.limit)
appBuffer.limit(appBuffer.capacity)
appBuffer.flip
} else if (res.getStatus == Status.CLOSED) {
continue = false
}
}
netBuffer.flip
plexerBarrier.await
channel.write(netBuffer)
if (netBuffer.hasRemaining && !writeAside(netBuffer, channel)) dispose
}
// it is valid to use connWrite here even for case when HOut will switch to
// read later:
// - counterpart peer will not be ready to read, and selector will not
// select a key,
// - if by some reason selector still selects a key for writing, it will just
// reset not awaited barrier, as far as HOut doesn't write anything.
connWrite
}
}
|
gnieh/tiscaf
|
core/src/main/scala/tiscaf/HPeer.scala
|
Scala
|
lgpl-3.0
| 8,592 |
package org.ffmmx.example.musicplayer
import org.scaloid.common._
import android.widget._
import android.content.{Intent, Context, BroadcastReceiver}
import android.widget.SeekBar.OnSeekBarChangeListener
import android.view._
import scala.collection.mutable.{ListBuffer, Stack}
import android.os.Environment
import java.io.{FileFilter, File}
import scala.collection.mutable
import scala.annotation.tailrec
import scala.collection.JavaConverters._
class MainActivity extends SActivity {
override def basis: SActivity = this
implicit override val ctx: SActivity = basis
val receiver: BroadcastReceiver = new MusicPlayerBroadcastReceiver
var nowPlay: Song = new Song(R.raw.test_music, "测试", "semon", 290000)
val playList: ListBuffer[Song] = ListBuffer(nowPlay)
var previousPlay: Song = nowPlay
var nextPlay: Song = nowPlay
var songTitleView: TextView = _
var songAuthorView: TextView = _
var songTimeLengthView: TextView = _
var playPauseButton: ImageButton = _
var previousButton: ImageButton = _
var nextButton: ImageButton = _
var seekBar: SeekBar = _
var playListView: ListView = _
onCreate({
setContentView(R.layout.main)
songTitleView = find[TextView](R.id.songTitleView)
songAuthorView = find[TextView](R.id.songAuthorView)
songTimeLengthView = find[TextView](R.id.songTimeLengthView)
//播放暂停按钮
playPauseButton = find[ImageButton](R.id.playPauseButton)
.onClick({
playPause()
})
//上一首按钮
previousButton = find[ImageButton](R.id.previousButton)
.onClick({
previous()
})
//下一首按钮
nextButton = find[ImageButton](R.id.nextButton)
.onClick({
next()
})
// 进度条
seekBar = find[SeekBar](R.id.seekBar).onSeekBarChangeListener(new OnSeekBarChangeListener {
def onProgressChanged(seekBar: SeekBar, progress: Int, fromUser: Boolean) {
//在进度改变的时候
}
def onStopTrackingTouch(seekBar: SeekBar) {
//在停止拖动的时候
nowPlay.curTime = (seekBar.progress / 100.0 * nowPlay.length).toInt
sendBroadcast(
new Intent(Constants.MUSIC_SERVICE_ACTION)
.putExtra("action", Constants.PLAY_ACTION_SEEK)
.putExtra("song", nowPlay)
)
sendBroadcast(new Intent(Constants.MUSIC_SERVICE_ACTION)
.putExtra("action", Constants.PLAY_ACTION_RESUME_UPDATE_SEEKBAR))
}
def onStartTrackingTouch(seekBar: SeekBar) {
//开始拖动的时候
sendBroadcast(new Intent(Constants.MUSIC_SERVICE_ACTION)
.putExtra("action", Constants.PLAY_ACTION_SUSPEND_UPDATE_SEEKBAR))
}
})
// todo 播放列表
playListView = find[ListView](R.id.playListView)
playListView.adapter(new PlayListAdapter(playList))
// 注册播放器广播
// receiver = new MusicPlayerBroadcastReceiver()
registerReceiver(receiver, Constants.MUSIC_PLAYER_ACTION)
//开始播放服务
startService(SIntent[MusicPlayService])
})
onDestroy({
//注销广播
unregisterReceiver(receiver)
})
override def onCreateOptionsMenu(menu: Menu): Boolean = {
getMenuInflater.inflate(R.menu.main_menu, menu)
super.onCreateOptionsMenu(menu)
}
def updateSeekBar(song: Song) {
seekBar.progress = song.curTime * 100 / song.length
seekBar.secondaryProgress = seekBar.progress
}
def prepare(song: Song) {
nowPlay = song
nowPlay.curTime = 0
}
/**
* 上一首
*/
def previous() {
prepare(previousPlay)
playPause()
}
/**
* 下一首
*/
def next() {
prepare(nextPlay)
playPause()
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
item.getItemId match {
case R.id.mainmenu_add =>
// 打开一个对话框,添加音乐文件
startActivity(SIntent[FileDialog])
case R.id.mainmenu_about =>
alert("关于", "一个用于测试的简单播放器")
case R.id.mainmenu_setting =>
// todo 打开设置界面,然后设置媒体库或者搜索音乐文件的文件夹
case R.id.mainmenu_quit =>
stopService(SIntent[MusicPlayService])
finish()
}
super.onOptionsItemSelected(item)
}
def playPause() {
if (nowPlay == null) {
try {
prepare(playList(0))
}
}
if (nowPlay != null)
// 发送播放或者停止请求到播放服务
sendBroadcast(
new Intent(Constants.MUSIC_SERVICE_ACTION)
.putExtra("action", Constants.PLAY_ACTION_PLAYPAUSE)
.putExtra("song", nowPlay)
)
}
class PlayListItem(val seq: TextView, val author: TextView, val title: TextView, val length: TextView)
/**
* 播放列表配置器
*/
class PlayListAdapter(val data: ListBuffer[Song])(implicit val context: Context) extends BaseAdapter {
def getCount: Int = data.size
def getItem(position: Int): Song = data(position)
def getItemId(position: Int): Long = position
def getView(position: Int, convertView: View, parent: ViewGroup): View = {
var playListItem: PlayListItem = null
var resultView: View = convertView
if (resultView == null) {
resultView = LayoutInflater.from(context).inflate(R.layout.playlist, null)
playListItem = new PlayListItem(
resultView.find[TextView](R.id.seq),
resultView.find[TextView](R.id.author),
resultView.find[TextView](R.id.title),
resultView.find[TextView](R.id.length)
)
resultView.tag(playListItem)
}
else {
playListItem = resultView.tag.asInstanceOf[PlayListItem]
}
if (playListItem != null) {
playListItem.seq.text(position.toString)
playListItem.author.text = data(position).author
playListItem.title.text = data(position).title
playListItem.length.text = data(position).length / 1000 / 60 + ":" + data(position).length / 1000 % 60
}
resultView
}
}
}
class MusicPlayerBroadcastReceiver extends BroadcastReceiver {
def onReceive(context: Context, intent: Intent) {
// 更新界面图标
val container = context.asInstanceOf[MainActivity]
val songTitleView: TextView = container.songTitleView
val songAuthorView: TextView = container.songAuthorView
val songTimeLengthView: TextView = container.songTimeLengthView
val playPauseButton = container.playPauseButton
if (intent.getExtras.containsKey("status"))
intent.getIntExtra("status", Constants.PLAY_STATUS_STOP) match {
case Constants.PLAY_STATUS_PLAY =>
playPauseButton.imageResource(R.drawable.pause_sel)
case _ =>
playPauseButton.imageResource(R.drawable.play_sel)
}
// 更新界面歌曲
val song = intent.getSerializableExtra("song").asInstanceOf[Song]
if (song.author != null)
songAuthorView.text = song.author
if (song.title != null)
songTitleView.text = song.title
// 更新界面时间
if (song.curTime != 0) {
if (container.nowPlay != null)
container.nowPlay.curTime = song.curTime
songTimeLengthView.text = song.curTime / 1000 / 60 + ":" + song.curTime / 1000 % 60 + " / " + song.length / 1000 / 60 + ":" + song.length / 1000 % 60
}
// 更新进度条
if (!container.seekBar.isPressed)
container.updateSeekBar(song)
// 更新播放列表
if(intent.getSerializableExtra("selectFiles") != null){
val files=intent.getSerializableExtra("selectFiles").asInstanceOf[List[File]]
// files.foreach(f => container.playList += new Song(container.get))
// todo 更新播放列表
}
}
}
class Song(val title: String, val author: String) extends Serializable {
var length: Int = 0
var bitrate: Int = 0
var star: Int = 0
var playTimes: Int = 0
var curTime: Int = 0
var filepath:String=_
var songId:Int=_
def this(songId: Int, title: String, author: String, length: Int, bitrate: Int, star: Int, playTimes: Int, curTime: Int) {
this( title, author)
this.length = length
this.bitrate = bitrate
this.star = star
this.playTimes = playTimes
this.curTime = curTime
this.songId = songId
}
def this(songId: Int, title: String, author: String, length: Int) {
this( title, author)
this.length = length
this.songId=songId
}
def this(title:String , author:String,filepath:String){
this(title,author)
this.filepath=filepath
}
}
object Constants {
//播放器广播action
val MUSIC_PLAYER_ACTION = "org.ffmmx.example.musicplayer.MusicPlayerActivity"
//播放服务广播action
val MUSIC_SERVICE_ACTION = "org.ffmmx.example.musicplayer.MusicPlayerService"
//播放状态 停止
val PLAY_STATUS_STOP = 0
//播放状态 播放
val PLAY_STATUS_PLAY = 1
//播放状态 暂停
val PLAY_STATUS_PAUSE = 2
//播放动作
val PLAY_ACTION_PLAYPAUSE = 0
val PLAY_ACTION_PAUSE = 1
val PLAY_ACTION_STOP = 2
val PLAY_ACTION_PREVIOUS = 3
val PLAY_ACTION_NEXT = 4
val PLAY_ACTION_SEEK = 5
val PLAY_ACTION_SUSPEND_UPDATE_SEEKBAR = 6
val PLAY_ACTION_RESUME_UPDATE_SEEKBAR = 7
}
class FileDialog extends SActivity {
val HOME_PATH = Environment.getExternalStorageDirectory.getPath
val LOCATION_LABEL = "位置: "
val locationList: ListBuffer[File] = ListBuffer[File]()
var current: File = new File(HOME_PATH)
override def basis = this
implicit override val ctx: SActivity = this
var location: TextView = _
var enterButton: Button = _
var fileListView: ListView = _
var allSelectCheckbox: CheckBox = _
val fileFilter = new FileFilter {
def accept(file: File): Boolean = {
file.getName match {
case ".android_secure" => false
case _ => true
}
}
}
onCreate {
setContentView(R.layout.file_dialog)
location = find[TextView](R.id.location)
enterButton = find[Button](R.id.filedialog_enter)
.onClick {
val adapter = fileListView.adapter.asInstanceOf[FileListAdapter]
sendBroadcast(new Intent(Constants.MUSIC_PLAYER_ACTION)
.putExtra("selectFiles", selectFiles(adapter.selecteds.filter(_._2).map(m => adapter.data(m._1)).toList // 发送选择的文件列表
)))
finish()
}
fileListView = find[ListView](R.id.fileListView)
allSelectCheckbox = find[CheckBox](R.id.filedialog_checkbox).onCheckedChanged {
//全选按钮实现
val adapter = fileListView.adapter.asInstanceOf[FileListAdapter]
if (allSelectCheckbox.isChecked)
(0 to adapter.getCount).foreach(x => adapter.selecteds += (x -> true))
else (0 to adapter.getCount).foreach(x => adapter.selecteds += (x -> false))
}
location.text(LOCATION_LABEL + HOME_PATH)
if (Environment.getExternalStorageDirectory.canRead) {
fileListView.adapter(new FileListAdapter(Environment.getExternalStorageDirectory.listFiles(fileFilter).toList))
}
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
item.getItemId match {
case R.id.filedialogmenu_back =>
back()
case R.id.filedialogmenu_home =>
openDir(new File(HOME_PATH))
}
super.onOptionsItemSelected(item)
}
override def onCreateOptionsMenu(menu: Menu): Boolean = {
getMenuInflater.inflate(R.menu.file_dialog_menu, menu)
super.onCreateOptionsMenu(menu)
}
/**
* 打开文件夹
* @param dir 文件夹
*/
def openDir(dir: File) {
if (!dir.isDirectory)
throw new RuntimeException("dir必须为文件夹")
locationList += current
jump(dir)
}
/**
* 后退
*/
def back() {
if (!locationList.isEmpty) {
jump(locationList.remove(locationList.size - 1))
}
}
/**
* 跳转
* @param dir
*/
private def jump(dir: File) {
current = dir
location.text(LOCATION_LABEL + dir.getPath)
fileListView.adapter.asInstanceOf[FileListAdapter].data = dir.listFiles().toList
fileListView.adapter.asInstanceOf[FileListAdapter].notifyDataSetChanged()
}
/**
* 得到所选择的文件
*/
def selectFiles(files: List[File]): Array[File] = {
def findFiles(files: List[File]): List[File] = {
files.flatMap {
file => file.isDirectory match {
case false => List(file)
case true => findFiles(file.listFiles(fileFilter).toList)
}
}.toList
}
findFiles(files).toArray
}
class FileList(val checkbox: CheckBox, val img: ImageView, val filename: TextView)
class FileListAdapter(var data: List[File])(implicit context: Context) extends BaseAdapter {
var selecteds = Map[Int, Boolean]()
def getCount: Int = data.size
def getItem(position: Int): File = data(position)
def getItemId(position: Int): Long = position
def getView(position: Int, convertView: View, parent: ViewGroup): View = {
var fileList: FileList = null
var filelistView: View = convertView
filelistView match {
case null =>
filelistView = LayoutInflater.from(context).inflate(R.layout.filelist, null)
fileList = new FileList(filelistView.find[CheckBox](R.id.filelist_checkbox),
filelistView.find[ImageView](R.id.filelist_img),
filelistView.find[TextView](R.id.filelist_filename)
)
filelistView.tag(fileList)
case _ =>
fileList = filelistView.tag.asInstanceOf[FileList]
}
fileList.filename.text(data(position).getName)
fileList.img.imageResource(data(position).isDirectory match {
case true => R.drawable.gtk_directory
case false => R.drawable.gtk_file
})
fileList.checkbox.onCheckedChanged {
if (fileList.checkbox.isChecked)
selecteds += (position -> true)
else
selecteds -= position
}.setChecked(selecteds.getOrElse(position, false))
filelistView.onClick {
v =>
if (v.id != R.id.filedialog_checkbox) {
if (data(position).isDirectory)
openDir(data(position))
else if (fileList.checkbox.isChecked)
fileList.checkbox.setChecked(true)
else
fileList.checkbox.setChecked(false)
}
}
}
}
}
|
firefoxmmx2/Android_MusicPlayer
|
src/org/ffmmx/example/musicplayer/activity.scala
|
Scala
|
apache-2.0
| 14,246 |
package scalera.moonrover
import scalera.moonrover.RoverProgram._
import scalera.moonrover.interpreter.Program
class SimulatorTest extends BaseTest("Simulator") {
it should "run the simulation and find out if the program is useless" in {
val sim = Simulator(
Program(
1 -> NOP,
2 -> LEFT,
3 -> GOTO(1)))
val (offSim,result) = sim.run
result shouldEqual None
}
}
|
Scalera/moonrover
|
core/src/test/scala/scalera/moonrover/SimulatorTest.scala
|
Scala
|
apache-2.0
| 412 |
package cpup.mc.oldenMagic.content.runes
import cpup.mc.oldenMagic.api.oldenLanguage.runes.{TRuneType, TRune}
import cpup.mc.oldenMagic.api.oldenLanguage.runeParsing.TVerbRune
import cpup.mc.oldenMagic.api.oldenLanguage.casting.CastingContext
import net.minecraft.entity.Entity
import cpup.mc.lib.util.pos.BlockPos
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.util.IIcon
import cpup.mc.oldenMagic.OldenMagicMod
import net.minecraft.nbt.NBTTagCompound
import cpup.mc.oldenMagic.api.oldenLanguage.PassiveSpellsContext
import cpup.mc.oldenMagic.api.oldenLanguage.textParsing.{TextRune, TParsingContext, TTransform}
import net.minecraft.client.renderer.texture.IIconRegister
import cpup.mc.lib.targeting.TTarget
class ProtectRune extends TRune with TVerbRune {
def runeType = ProtectRune
def writeToNBT(nbt: NBTTagCompound) {}
override def act(context: CastingContext, targets: List[TTarget]) {
context match {
case PassiveSpellsContext(player, caster, spell, action: DamageAction) =>
// println(action.amt, caster.power)
action.amt -= caster.usePower(caster.power / 10)
case _ =>
}
}
def act(context: CastingContext, entity: Entity) {}
def act(context: CastingContext, pos: BlockPos) {}
@SideOnly(Side.CLIENT)
def icons = List(ProtectRune.icon)
}
object ProtectRune extends TRuneType {
def mod = OldenMagicMod
def name = s"${mod.ref.modID}:protect"
def runeClass = classOf[ProtectRune]
def readFromNBT(nbt: NBTTagCompound) = new ProtectRune
@SideOnly(Side.CLIENT)
var icon: IIcon = null
@SideOnly(Side.CLIENT)
def registerIcons(register: IIconRegister) {
icon = register.registerIcon(s"${mod.ref.modID}:runes/protect")
}
}
object ProtectTransform extends TTransform {
def transform(context: TParsingContext, content: String) = new ProtectRune
}
|
CoderPuppy/oldenmagic-mc
|
src/main/scala/cpup/mc/oldenMagic/content/runes/ProtectRune.scala
|
Scala
|
mit
| 1,811 |
package com.datawizards.splot.examples.charts.bar
import com.datawizards.splot.api.implicits._
object BarChartWithSimpleAPI extends App {
SPlot.plotBar(Seq(1.0, 4.0, 9.0))
SPlot.plotBar(Seq("a","b","c"), Seq(1.0, 4.0, 9.0))
}
|
piotr-kalanski/SPlot
|
src/main/scala/com/datawizards/splot/examples/charts/bar/BarChartWithSimpleAPI.scala
|
Scala
|
apache-2.0
| 233 |
package redmine4s.api.resource
import org.scalatest.{DiagrammedAssertions, FlatSpec}
class EnumerationResourceSpec extends FlatSpec with DiagrammedAssertions {
}
|
tomingtoming/redmine4s
|
src/test/scala/redmine4s/api/resource/EnumerationResourceSpec.scala
|
Scala
|
apache-2.0
| 164 |
package cc.factorie.app.nlp.hcoref
import java.io._
import scala.io.Source
import cc.factorie._
import cc.factorie.util.{VectorUtils, EvaluatableClustering, NonValidatingXML}
import cc.factorie.app.nlp._
import cc.factorie.app.nlp.pos.OntonotesForwardPosTagger
import cc.factorie.app.nlp.ner.NoEmbeddingsConllStackedChainNer
import cc.factorie.app.nlp.coref.ParseForwardCoref
import cc.factorie.app.nlp.parse.OntonotesTransitionBasedParser
import scala.util.Random
import cc.factorie.app.nlp.segment.{DeterministicSentenceSegmenter, DeterministicTokenizer}
import cc.factorie.app.nlp.phrase.Phrase
import cc.factorie.variable.{DenseDoubleBagVariable, CategoricalDomain, BagOfWordsVariable}
import java.util.zip.GZIPInputStream
import scala.collection.mutable.{ArrayBuffer, HashMap}
/**
* @author John Sullivan
*/
object TACCorefWithFactorie {
def main(args:Array[String]) {
val tacRoot = args(0)
val evalPath = args(1)
val map = new Tac2009FlatDocumentMap(tacRoot)
val refMentions = ProcessQueries.loadQueries(evalPath + ".xml", evalPath + ".tab")
println("loaded %d mentions/queries in %d entities.".format(refMentions.size, refMentions.map(_.entId).toSet.size))
val pipelineElements = Seq(
DeterministicTokenizer,
DeterministicSentenceSegmenter,
OntonotesForwardPosTagger,
NoEmbeddingsConllStackedChainNer,
OntonotesTransitionBasedParser,
ParseForwardCoref
)
val pipeline = DocumentAnnotatorPipeline(DocumentAnnotatorPipeline.defaultDocumentAnnotationMap.toMap, Nil, pipelineElements.flatMap(_.postAttrs))
println("Processing ref mentions and documents: ")
refMentions.par.foreach{ rMention =>
val doc = new Document(map.getDoc(rMention.docId).toIterator.mkString("\\n")).setName(rMention.docId)
rMention.doc = Some(doc)
rMention.getTokenSpan.map(ts => doc.getCoref.addMention(new Phrase(ts))) // we add our gold mentions before coref and processing
pipeline.process(doc)
print(".")
}
val converter = new RefMentionConverter(pipeline)
val mentions = refMentions.flatMap(converter.toDocEntNode).toSeq
println("Found %d mentions in documents out of %d total mention (%.4f \\\\%)".format(mentions.size, refMentions.size, mentions.size.toDouble/refMentions.size))
val splitPoint = (mentions.size * 0.75).toInt
val (train, test) = mentions.splitAt(splitPoint)
println("Split into %d training and %d testing".format(train.size, test.size))
implicit val rand = new Random()
val tacCoref = new DocEntityCoref {implicit val random: Random = rand
def estimateIterations(mentionCount: Int) = mentionCount * 100
val model = new DocEntityCorefModel(4.0, 0.25, 1.0, 2.0, 0.25, 1.0, 0.25, 3.0, 0.25, 1.0, 0.25)
val autoStopThreshold = 10000
}
val sampler = tacCoref.getSampler(test)
sampler.infer
}
}
object TACCoref {
//val tagger = new OntonotesForwardPosTagger()
def main(args:Array[String]) {
val tacRoot = args(0)
val evalPath = args(1)
val embeddingFile = args(2)
val embeddings = EmbeddingSpace.fromFile(embeddingFile)
val map = new Tac2009FlatDocumentMap(tacRoot)
val refMentions = ProcessQueries.loadQueries(evalPath + ".xml", evalPath + ".tab")
val mentions = refMentions.flatMap{ rMention =>
val doc = new Document(map.getDoc(rMention.docId).toIterator.mkString("\\n")).setName(rMention.docId)
DeterministicTokenizer.process(doc)
DeterministicSentenceSegmenter.process(doc)
rMention.doc = Some(doc)
val tokenSpanOpt = doc.getSectionByOffsets(rMention.getOffsets._1, rMention.getOffsets._2).getOrElse(doc.asSection).offsetSnapToTokens(rMention.getOffsets._1, rMention.getOffsets._2)
if(tokenSpanOpt.isEmpty) {
println("for doc %s didn't find token span from name %s and offsets: %s".format(rMention.docId, rMention.name, rMention.getOffsets))
}
tokenSpanOpt.map{ tokenSpan =>
val nameBag = new BagOfWordsVariable()
val contextBag = new BagOfWordsVariable()
val nerBag = new BagOfWordsVariable()
val mentionBag = new BagOfWordsVariable()
val numberBag = new BagOfWordsVariable()
val truth = new BagOfWordsVariable()
val contextVec = new DenseDoubleBagVariable(50)
nameBag ++= tokenSpan.tokens.map(_.string)
contextBag ++= tokenSpan.contextWindow(10).groupBy(_.string).mapValues(_.size.toDouble)
contextVec.set(embeddings.embedPhrase(contextBag.value.asHashMap.keySet.toSeq))(null)
nerBag += rMention.entType
truth += rMention.entId
new Mention[DenseDocEntityVars](new DenseDocEntityVars(nameBag, contextBag, nerBag, contextVec, numberBag, truth), rMention.id)(null)
}
}
println("done finding token spans and building mentions")
val splitPoint = (mentions.size * 0.75).toInt
val (train, test) = mentions.splitAt(splitPoint)
println("Split into %d training and %d testing".format(train.size, test.size))
implicit val rand = new Random()
class DocEntityModel(namesWeights:Double, namesShift:Double, nameEntropy:Double, contextsWeight:Double, contextsShift:Double, matchScore:Double, matchPenalty:Double, denseContextWeight:Double, denseContextShift:Double) extends CorefModel[DenseDocEntityVars] {
this += new ChildParentCosineDistance(namesWeights, namesShift, {v:DenseDocEntityVars => v.names})
this += new ChildParentCosineDistance(contextsWeight, contextsShift, {v:DenseDocEntityVars => v.context})
this += new MatchConstraint(matchScore, matchPenalty, {v:DenseDocEntityVars => v.nerType})
this += new DenseCosineDistance(denseContextWeight, denseContextShift, {v:DenseDocEntityVars => v.contextVec})
this += new BagOfWordsEntropy(nameEntropy, {v:DenseDocEntityVars => v.names})
}
val model = new DocEntityModel(1.0, -0.25, 0.5, 1.0, -0.25, 1.0, -10.0, 1.0, -0.25)
val trainer = new CorefSampler[DenseDocEntityVars](model, train, train.size * 100)
with AutoStoppingSampler[DenseDocEntityVars]
with CanopyPairGenerator[DenseDocEntityVars]
with NoSplitMoveGenerator[DenseDocEntityVars]
with DebugCoref[DenseDocEntityVars]
with TrainingObjective[DenseDocEntityVars] {
def newInstance(implicit d: DiffList) = new Node[DenseDocEntityVars](new DenseDocEntityVars())
val autoStopThreshold = 10000
}
trainer.train(100000)
println(trainer.model.parameters.tensors)
val sampler = new CorefSampler[DenseDocEntityVars](model, test, test.size * 100)
with AutoStoppingSampler[DenseDocEntityVars]
with CanopyPairGenerator[DenseDocEntityVars]
with NoSplitMoveGenerator[DenseDocEntityVars]
with DebugCoref[DenseDocEntityVars]
with TrainingObjective[DenseDocEntityVars] {
def newInstance(implicit d: DiffList) = new Node[DenseDocEntityVars](new DenseDocEntityVars())
val autoStopThreshold = 10000
}
sampler.infer
println(EvaluatableClustering.evaluationString(test.predictedClustering, test.trueClustering))
val goldMap = test.map { mention =>
mention.variables.truth.value.asHashMap.keySet.head -> mention.uniqueId
}.groupBy(_._1).mapValues(_.map(_._2).toSet)
val predMap = test.map{m:Node[DenseDocEntityVars] => m.root}.toSet.map { entities:Node[DenseDocEntityVars] =>
entities.variables.truth.value.topWord -> entities.mentions.map(_.uniqueId).toSet
}.toMap
//println(LinkingScorer.scoreString(predMap, goldMap))
}
}
/**
* Takes a docId and returns the raw text of the corresponding document
*/
trait DocumentMap {
def getDoc(docId:String):BufferedReader
}
class Tac2009FlatDocumentMap(tacRoot:String) extends DocumentMap {
def getDoc(docId:String):BufferedReader = {
val filePath = s"$tacRoot/$docId.sgm"
new BufferedReader(new FileReader(filePath))
}
}
object ProcessQueries {
def loadQueries(queryXMLFile:String, queryTabFile:String):Iterable[ReferenceMention] = {
val entMap = Source.fromFile(queryTabFile).getLines().map { line =>
val Array(mentId, entId, entType) = line.split("\\\\s+")
mentId -> (entId, entType)
}.toMap
NonValidatingXML.loadFile(queryXMLFile).\\\\("kbpentlink").\\\\("query").map { qXML =>
val id = (qXML \\ "@id").text.trim
val name = (qXML \\ "name").text.trim
val docName = (qXML \\ "docid").text.trim
val beg = qXML \\ "beg"
val end = qXML \\ "end"
assert(beg.isEmpty == end.isEmpty)
val offsets:Option[(Int, Int)] = if (beg.isEmpty || end.isEmpty) None else Some(beg.text.toInt, end.text.toInt)
ReferenceMention(id, name, docName, offsets, entMap(id)._1, entMap(id)._2)
}
}
}
case class ReferenceMention(id:String, name:String, docId:String, offsets:Option[(Int, Int)], entId:String, entType:String) {
var doc:Option[Document] = None
def getOffsets:(Int, Int) = offsets.getOrElse {
val start = doc.get.string.replaceAll("""-\\n""","-").replaceAll("""\\n"""," ").indexOfSlice(name)
val end = start + name.length - 1
start -> end
}
def getTokenSpan = doc.get.getSectionByOffsets(this.getOffsets._1, this.getOffsets._2).getOrElse(doc.get.asSection).offsetSnapToTokens(this.getOffsets._1, this.getOffsets._2)
}
object RefMentionConverterNoPipeline {
def toDocEntNode(ref:ReferenceMention):Option[Mention[DocEntityVars]] = {
val doc = ref.doc.get
DeterministicTokenizer.process(doc)
DeterministicSentenceSegmenter.process(doc)
val offsetOpt = ref.offsets match {
case None =>
ref.name.r.findFirstMatchIn(doc.string).map(m => m.start -> m.end)
case otw => otw
}
offsetOpt.flatMap{ case (s, e) =>
doc.getSectionByOffsets(s, e).flatMap(_.offsetSnapToTokens(s, e)) match {
case Some(refSpan) =>
implicit val d:DiffList = null
val xMent = new Mention[DocEntityVars](new DocEntityVars())
xMent.variables.names ++= refSpan.map{t:Token => t.lemmaString}.toCountBag
xMent.variables.context ++= refSpan.contextWindow(10).map(_.lemmaString).toCountBag
Option(doc.coref).flatMap{_.findOverlapping(refSpan)} match {
case Some(ment) =>
xMent.variables.++=(DocEntityVars.fromWithinDocEntity(ment.entity))(null)
xMent.withinDocEntityId = ment.entity.uniqueId
case None => println("Could not find coref or align mention: " + ref)
}
Some(xMent)
case None =>
println("WARNING: Failed to find tokens for reference mention: " + ref)
None
}
}
}
}
class RefMentionConverter(val pipeline:DocumentAnnotationPipeline) {
def toDocEntNode(ref:ReferenceMention):Option[Mention[DocEntityVars]] = {
val doc = pipeline.process(ref.doc.get)
val offsetOpt = ref.offsets match {
case None =>
ref.name.r.findFirstMatchIn(doc.string).map(m => m.start -> m.end)
case otw => otw
}
offsetOpt.flatMap{ case (s, e) =>
doc.getSectionByOffsets(s, e).flatMap(_.offsetSnapToTokens(s, e)) match {
case Some(refSpan) =>
implicit val d:DiffList = null
val xMent = new Mention[DocEntityVars](new DocEntityVars(), ref.id)
xMent.variables.names ++= refSpan.map{t:Token => t.lemmaString}.toCountBag
xMent.variables.context ++= refSpan.contextWindow(10).map(_.lemmaString).toCountBag
xMent.variables.truth += ref.entId
Option(doc.coref).flatMap{_.findOverlapping(refSpan)} match {
case Some(ment) =>
xMent.variables.++=(DocEntityVars.fromWithinDocEntity(ment.entity))(null)
xMent.withinDocEntityId = ment.entity.uniqueId
case None => println("Could not find coref or align mention: " + ref)
}
Some(xMent)
case None =>
println("WARNING: Failed to find tokens for reference mention: " + ref)
None
}
}
}
}
object GenerateEmbeddings {
def main(args:Array[String]) {
val tacRoot = args(0)
val evalPath = args(1)
val embeddingFilename = args(2)
val map = new Tac2009FlatDocumentMap(tacRoot)
val refMentions = ProcessQueries.loadQueries(evalPath + ".xml", evalPath + ".tab")
val tokens = refMentions.map{ rMention =>
val doc = new Document(map.getDoc(rMention.docId).toIterator.mkString("\\n")).setName(rMention.docId)
DeterministicTokenizer.process(doc)
DeterministicSentenceSegmenter.process(doc)
doc.tokens.map(_.lemmaString)
}
println("loaded and tokenized, starting embeddings")
val dimensions = 50
val iterations = 10
val regularizer = 10
val learningRate = 0.1
val random = new scala.util.Random(0)
val domain = new CategoricalDomain[String]()
val space = new EmbeddingSpace(domain,dimensions,random)
println("embeddings initialized")
space.learnEmbeddingsFromText(tokens,iterations,regularizer,learningRate)
println("writing embeddings")
Embeddings.writeEmbedding(new File(embeddingFilename), space)
//testEmbeddings(space,test)
}
}
object EmbeddingSpace{
import VectorUtils._
def fromFile(fileName:String):EmbeddingSpace ={
val reader = if(fileName.endsWith(".gz") || fileName.endsWith("tgz")) new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(new File(fileName)))))
else new BufferedReader(new InputStreamReader(new FileInputStream(new File(fileName))))
var result:EmbeddingSpace=null
val map = new HashMap[String,Array[Double]]
var line: String = ""
//val tmpResult = new ArrayBuffer[Pair[String,Array[Double]]]
while({line = reader.readLine(); line != null}){
val pair = line.split("[\\t]")
assert(pair.length == 2, "{%s} is %d in length" format(line, pair.length))
val weights = pair(1).split(" ").map(e => e.toDouble)
if (result==null)result = new EmbeddingSpace(new CategoricalDomain[String],weights.length,new scala.util.Random(0))
result.setEmbedding(pair(0),weights)
}
result
}
def stopWordStats(space:EmbeddingSpace,stop:Seq[String],control:Seq[String]){
val mean = zero(space.dimensionality)
var meanNorm = 0.0
//val variance = zero(space.dimensionality)
space.wordTypes.foreach(mean += _)
space.wordTypes.foreach(meanNorm += _.twoNorm)
mean /= space.wordTypes.size.toDouble
meanNorm /= space.wordTypes.size.toDouble
//space.wordTypes.foreach(x => variance += x.twoDistance(mean))
println("Mean: "+mean.mkString(","))
println("||Mean||: "+mean.twoNorm)
println("Average ||Mean||: "+meanNorm)
val wordsAndLabels = stop.map(_ -> "stop") ++ control.map(_ -> "ctrl")
val numStops = wordsAndLabels.filter(_._2=="stop").size
val numControl = wordsAndLabels.size-numStops
var stopFromMean=0.0
var controlFromMean=0.0
println("Words: ")
for((word,label) <- wordsAndLabels){
val x = space.getOrElseZero(word)
val norm = x.twoNorm
val toMean = (x-mean).twoNorm
val h = x.normalizedEntropyForLogValues
if (label=="stop")stopFromMean+=toMean else controlFromMean+=toMean
//if (label=="stop")stopFromMean+=h else controlFromMean+=h
println(" "+label+" "+h+" "+toMean+" "+word+" "+norm)
}
stopFromMean /= numStops
controlFromMean /= numControl
val boundary = (stopFromMean + controlFromMean)/2
println("Stop from mean: "+stopFromMean)
println("Control from mean: "+controlFromMean)
var numCorrect=0
var total=0
for((word,label) <- wordsAndLabels){
val x = space.getOrElseZero(word)
val toMean = (x-mean).twoNorm
val predictStop = toMean < boundary
val isStop = label=="stop"
if((predictStop && isStop) || (!predictStop && !isStop))numCorrect += 1
total+=1
}
println("Accuracy: "+numCorrect.toDouble/total.toDouble)
}
}
class EmbeddingSpace(val domain:CategoricalDomain[String],val dimensionality:Int,val random:scala.util.Random){
import VectorUtils._
val wordTypes = new ArrayBuffer[Array[Double]]
def mean = {val r = zero(dimensionality);var i=0;while(i<wordTypes.size){r+=wordTypes(i);i+=1};r/=wordTypes.size.toDouble;r}
def setEmbedding(s:String,v:Array[Double]) ={
val idx = domain.index(s)
if (idx==wordTypes.size)wordTypes += v
else if(idx<wordTypes.size)wordTypes(idx)=v
else throw new Exception("Error: domain and word type embeddings buffer are out of sync.")
}
def apply(s:String):Array[Double] = {
val idx = domain.index(s)
var result:Array[Double] = null
if (idx<wordTypes.size)result = wordTypes(idx)
else if(idx==wordTypes.size){
result = newEmbedding(s)
wordTypes += result
}else throw new Exception("Error: domain and word type embeddings buffer are out of sync.")
assert(result!=null)
result
}
def getOrElseZero(s:String):Array[Double] ={
val idx = domain.index(s)
var result:Array[Double] = null
if (idx<wordTypes.size)result = wordTypes(idx)
else if(idx==wordTypes.size){
result = zero(dimensionality)
wordTypes += result
}else throw new Exception("Error: domain and word type embeddings buffer are out of sync.")
assert(result!=null)
result
}
def embedPhrase(words:Seq[String]) = {
val result = zero(dimensionality)
for(v <- words.map(getOrElseZero(_)))result += v
result
}
def learnEmbeddingsFromText(examples:Iterable[Iterable[String]],iterations:Int,regularizer:Double,learningRate:Double){
learnEmbeddings(examples.map(ws=>new EmbeddingExample(ws.toIndexedSeq,this)).toIndexedSeq,iterations,regularizer,learningRate)
}
def learnEmbeddings(examples:IndexedSeq[EmbeddingExample],iterations:Int,regularizer:Double,learningRate:Double){
assert(examples.forall(_.space eq this))
assert(examples.forall(_.words.length>1))
println("Learning embeddings.")
for (i <- 1 to iterations){
println("Iteration "+i)
var j=0
for (example <- random.shuffle(examples)){
gradientStep(example,examples(random.nextInt(examples.size)),regularizer,learningRate*2.0/(math.sqrt(1.0+i.toDouble)))
j+=1
}
monitorDoc(examples.head)
println("Num updates: "+numUpdates+" out of "+numSteps+" opportunities.")
}
}
def monitorDoc(example:EmbeddingExample){
println(" Monitoring example")
for(w <- example.words){
val v = getOrElseZero(w)
println(" -w: "+w+" v: "+v.twoNorm())
}
}
var numUpdates=0
var numSteps=0
def gradientStep(example:EmbeddingExample,counterExample:EmbeddingExample,regularizer:Double,learningRate:Double){
val margin = regularizer/10.0
var i=0
val totalSum = example.computeSum()
assert(!totalSum.hasNaN)
while(i<example.wordVectors.length){
val word = example.words(i)
val wordv = example.wordVectors(i)
val contextSum = totalSum - wordv
assert(!contextSum.hasNaN)
contextSum /= (example.words.length-1.0)
assert(!contextSum.hasNaN)
val negativeExample = counterExample.computeSum()
negativeExample/=counterExample.words.length.toDouble
//val negativeExample = counterExample.wordVectors(random.nextInt(counterExample.wordVectors.length))
//val negativeExample = this.apply(domain(random.nextInt(domain.size)).category)
//val negativeExample = this.apply(domain(random.nextInt(domain.size)).category).makeCorruptObservation(_.corrupt(sigma,random))
if((wordv dot contextSum) < (wordv dot negativeExample) + margin){
wordv += (contextSum, learningRate)
//assert(!wordv.hasNaN)
wordv += (negativeExample, -learningRate)
//assert(!wordv.hasNaN)
val norm = wordv.twoNorm
if(norm>regularizer)wordv/=(norm/regularizer)
numUpdates += 1
}
numSteps += 1
i+=1
}
}
def newEmbedding(s:String) = randomArray(dimensionality,random)/dimensionality
}
class EmbeddingExample(val words:IndexedSeq[String],val space:EmbeddingSpace){
import VectorUtils._
val wordVectors = words.map(space(_))
def computeSum():Array[Double]={val contextSum=zero(space.dimensionality);wordVectors.foreach(contextSum += _);contextSum}
}
object Embeddings{
import VectorUtils._
//val test = Seq("vldb","emnlp","icml","nips","icvpr","acl","relation extraction","database","knowledge base","entity","coreference","graphical model","approach","face","physics","machine learning","cryptography","graphics","networks","learning","amccallum","elearnedmiller","amoore","speytonjones","ablum","tmitchell","dkarger")
def writeEmbedding(file:File,space:EmbeddingSpace){
val out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file)))
for(word <- space.domain.categories){
val vec = space.getOrElseZero(word)
out.write(word+"\\t"+vec.mkString(" ")+"\\n")
out.flush
}
out.flush
out.close
}
}
|
iesl/fuse_ttl
|
src/factorie-factorie_2.11-1.1/src/main/scala/cc/factorie/app/nlp/hcoref/TACCoref.scala
|
Scala
|
apache-2.0
| 20,912 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.ui
import javax.servlet.http.HttpServletRequest
import scala.xml.Node
import org.apache.spark.internal.Logging
import org.apache.spark.ui.{UIUtils, WebUIPage}
class ExecutionPage(parent: SQLTab) extends WebUIPage("execution") with Logging {
private val listener = parent.listener
override def render(request: HttpServletRequest): Seq[Node] = listener.synchronized {
// stripXSS is called first to remove suspicious characters used in XSS attacks
val parameterExecutionId = UIUtils.stripXSS(request.getParameter("id"))
require(parameterExecutionId != null && parameterExecutionId.nonEmpty,
"Missing execution id parameter")
val executionId = parameterExecutionId.toLong
val content = listener.getExecution(executionId).map { executionUIData =>
val currentTime = System.currentTimeMillis()
val duration =
executionUIData.completionTime.getOrElse(currentTime) - executionUIData.submissionTime
val summary =
<div>
<ul class="unstyled">
<li>
<strong>Submitted Time: </strong>{UIUtils.formatDate(executionUIData.submissionTime)}
</li>
<li>
<strong>Duration: </strong>{UIUtils.formatDuration(duration)}
</li>
{if (executionUIData.runningJobs.nonEmpty) {
<li>
<strong>Running Jobs: </strong>
{executionUIData.runningJobs.sorted.map { jobId =>
<a href={jobURL(jobId)}>{jobId.toString}</a><span> </span>
}}
</li>
}}
{if (executionUIData.succeededJobs.nonEmpty) {
<li>
<strong>Succeeded Jobs: </strong>
{executionUIData.succeededJobs.sorted.map { jobId =>
<a href={jobURL(jobId)}>{jobId.toString}</a><span> </span>
}}
</li>
}}
{if (executionUIData.failedJobs.nonEmpty) {
<li>
<strong>Failed Jobs: </strong>
{executionUIData.failedJobs.sorted.map { jobId =>
<a href={jobURL(jobId)}>{jobId.toString}</a><span> </span>
}}
</li>
}}
</ul>
</div>
val metrics = listener.getExecutionMetrics(executionId)
summary ++
planVisualization(metrics, executionUIData.physicalPlanGraph) ++
physicalPlanDescription(executionUIData.physicalPlanDescription)
}.getOrElse {
<div>No information to display for Plan {executionId}</div>
}
UIUtils.headerSparkPage(s"Details for Query $executionId", content, parent, Some(5000))
}
private def planVisualizationResources: Seq[Node] = {
// scalastyle:off
<link rel="stylesheet" href={UIUtils.prependBaseUri("/static/sql/spark-sql-viz.css")} type="text/css"/>
<script src={UIUtils.prependBaseUri("/static/d3.min.js")}></script>
<script src={UIUtils.prependBaseUri("/static/dagre-d3.min.js")}></script>
<script src={UIUtils.prependBaseUri("/static/graphlib-dot.min.js")}></script>
<script src={UIUtils.prependBaseUri("/static/sql/spark-sql-viz.js")}></script>
// scalastyle:on
}
private def planVisualization(metrics: Map[Long, String], graph: SparkPlanGraph): Seq[Node] = {
val metadata = graph.allNodes.flatMap { node =>
val nodeId = s"plan-meta-data-${node.id}"
<div id={nodeId}>{node.desc}</div>
}
<div>
<div id="plan-viz-graph"></div>
<div id="plan-viz-metadata" style="display:none">
<div class="dot-file">
{graph.makeDotFile(metrics)}
</div>
<div id="plan-viz-metadata-size">{graph.allNodes.size.toString}</div>
{metadata}
</div>
{planVisualizationResources}
<script>$(function() {{ renderPlanViz(); }})</script>
</div>
}
private def jobURL(jobId: Long): String =
"%s/jobs/job?id=%s".format(UIUtils.prependBaseUri(parent.basePath), jobId)
private def physicalPlanDescription(physicalPlanDescription: String): Seq[Node] = {
<div>
<span style="cursor: pointer;" onclick="clickPhysicalPlanDetails();">
<span id="physical-plan-details-arrow" class="arrow-closed"></span>
<a>Details</a>
</span>
</div>
<div id="physical-plan-details" style="display: none;">
<pre>{physicalPlanDescription}</pre>
</div>
<script>
function clickPhysicalPlanDetails() {{
$('#physical-plan-details').toggle();
$('#physical-plan-details-arrow').toggleClass('arrow-open').toggleClass('arrow-closed');
}}
</script>
<br/>
}
}
|
aokolnychyi/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
|
Scala
|
apache-2.0
| 5,485 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.services
import java.io.File
import org.bdgenomics.adam.io.{ ByteAccess, FileLocator, LocalFileByteAccess }
class ClasspathFileLocator(classpath: String) extends FileLocator {
override def relativeLocator(relativePath: String): FileLocator =
new ClasspathFileLocator("%s/%s".format(classpath.stripSuffix("/"), relativePath))
override def bytes: ByteAccess = {
val url = Thread.currentThread().getContextClassLoader.getResource(classpath)
if (url == null) { throw new IllegalArgumentException("Illegal classpath \\"%s\\"".format(classpath)) }
val path = url.getFile
val file = new File(path)
println("Returning bytes from %s".format(file.getAbsolutePath))
new LocalFileByteAccess(file)
}
override def parentLocator(): Option[FileLocator] = FileLocator.parseSlash(classpath) match {
case Some((parent, child)) => Some(new ClasspathFileLocator(parent))
case None => None
}
}
|
bigdatagenomics/bdg-services
|
bdgs-core/src/main/scala/org/bdgenomics/services/ClasspathFileLocator.scala
|
Scala
|
apache-2.0
| 1,755 |
package com.github.rcoh.query.lang
import scala.util.parsing.combinator.RegexParsers
/**
* Created by russell on 8/3/16.
*/
case class ParseError(message: String, offset: Int, formattedError: String) {
override def toString = formattedError
}
object QueryParser {
def parse(s: String): Either[ParseError, Query] = ParserImpl.parseAll(ParserImpl.query, s) match {
case ParserImpl.Success(result, _) => Right(result)
case ParserImpl.NoSuccess(error, next) => Left(createParseError(error, next))
}
def createParseError(error: String, next: ParserImpl.Input) = {
ParseError(error, next.pos.column-1, s"$error\n${next.source}\n${" " * (next.pos.column-1)}^")
}
}
private object ParserImpl extends RegexParsers {
case class Subquery(field: String, query: Query)
val ident = "[a-zA-Z]+".r | failure("Expected an identifier")
def num: Parser[Int] = ("[0-9]+".r | failure("Expected an integer")) ^^ { _.toInt }
// [a,b*100+5,c[d]*10]*5+100
// Query(Map("a" -> Query.NoQuery, "b" -> Query(Map(), Paging(100, 5)), "c" -> Query(Map("d" -> Query.NoQuery), Paging(10))
def query: Parser[Query] = opt(("[" | failure("Queries must start with `[`")) ~> rep1sep(subquery, ",") <~ ("]" | failure("Expected `]`"))) ~ opt(paging) ^^ { case subqueriesOpt ~ pagingOpt => {
val subqueries = subqueriesOpt.getOrElse(List())
val subqueryMap = subqueries.map(q => q.field -> q.query).toMap
Query(subqueryMap, pagingOpt.getOrElse(Query.DefaultPaging))
}}
def subquery: Parser[Subquery] = ident ~ query ^^ { case (field ~ query) => Subquery(field, query) }
// TODO: because paging is optional the errors don't propagate that well
def paging = ((("*" ~> num) ~ opt("+" ~> num)) | failure("Expected paging expression like *20+10")) ^^ { case (maxOpt ~ offsetOpt) =>
Paging(maxOpt, offsetOpt.getOrElse(0))
}
}
|
rcoh/lasic
|
src/main/scala/com/github/rcoh/query/lang/Parser.scala
|
Scala
|
mit
| 1,858 |
package com.plasmaconduit.algebro
import scala.language.higherKinds
import scala.language.implicitConversions
trait Foldable[F[_]] {
def fold[M: Monoid](m :F[M]): M
def foldMap[M: Monoid, A](f: A => M, n: F[A]): M
def foldRight[A, B](f: (A, B) => B, m: B, n: F[A]): B
def foldLeft[A, B](f: (B, A) => B, m: B, n: F[A]): B
def reduceRight[A: Monoid](f: (A, A) => A, n: F[A]): A
def reduceLeft[A: Monoid](f: (A, A) => A): A
}
final case class FoldableOps[F[_], A](foldable: F[A])(implicit F: Foldable[F]) {
def fold(implicit M: Monoid[A]): A = {
F.fold(foldable)
}
def foldMap[M: Monoid](f: A => M): M = {
F.foldMap(f, foldable)
}
def foldRight[B](m: B)(f: (A, B) => B): B = {
F.foldRight(f, m, foldable)
}
def foldLeft[B](f: (B, A) => B, m: B): B = {
F.foldLeft(f, m, foldable)
}
def reduceRight(f: (A, A) => A)(implicit M: Monoid[A]): A = {
F.foldRight(f, M.empty, foldable)
}
def reduceLeft(f: (A, A) => A)(implicit M: Monoid[A]): A = {
F.foldLeft(f, M.empty, foldable)
}
}
object Foldable {
implicit def toFoldableOps[F[_], A](foldable: F[A])(implicit F: Foldable[F]): FoldableOps[F, A] = {
FoldableOps[F, A](foldable)
}
}
|
plasmaconduit/algebro
|
src/main/scala/com/plasmaconduit/algebro/Foldable.scala
|
Scala
|
mit
| 1,211 |
package actors
import akka.actor.{Actor, ActorRef, PoisonPill, Props, actorRef2Scala}
import play.api.Mode
import play.api.Play.current
import play.api.libs.json.Json
import play.api.libs.ws.WSClient
import scala.collection.immutable.Map
import scala.concurrent.ExecutionContext.Implicits._
/*
* Classe Port used to bind container port
* in : VM's port opened
* out : port used by the container
*/
case class Port(in : Int, protocol : String, out : Int)
/*
* Classe CreateContainer : represents the container to deploy on a VM
* image : name of the image to deploy from dockerhub
* ports : ports used by the image
*/
case class CreateContainer(image : String, ports : Seq[Port])
/*
* Object used by websockets
*/
object ContainersActor {
def props(out: ActorRef, ws: WSClient) = Props(new ContainersActor(out, ws))
}
/*
* Actor used to send a container to swarm master to deploy the container
*/
class ContainersActor(out: ActorRef, ws: WSClient) extends Actor {
def receive = {
case msg: String => {
println(msg);
out ! ("""{ "info" : "I received your message: """ + msg + "\"}")
self ! PoisonPill
}
case CreateContainer(image, ports) => {
out ! Json.toJson(
Map(
"info" -> Json.toJson("Creating container for image " + image)
)
).toString()
println("CreateContainer Ok");
println(ports);
println(image);
val params = Json.parse(
"""
{
"AttachStdin": false,
"AttachStdout": true,
"AttachStderr": true,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Image": """" + image +
"""",
"ExposedPorts": {""" +
ports.map(port => s""" "${port.in}/${port.protocol}" : {} """).mkString("", ",", "")
// "80/tcp": {}
+
"""},
"StopSignal": "SIGTERM",
"HostConfig": {
"PortBindings": { """ +
ports.map(port => s""" "${port.in}/${port.protocol}" : [{ "HostPort" : "${port.out}"}] """).mkString("", ",", "")
// "80/tcp": [{ "HostPort": "8081" }]
+
"""},
"PublishAllPorts": false
}
}
""")
//Remote or local ip depending of the launch configuration of the server
val swarmMaster = current.mode match {
case Mode.Dev => current.configuration.getString("vapp.swarm-master.ip").get+":8080"
case Mode.Prod => "192.168.2.100:3376"
}
println(params);
ws.url ("https://" + swarmMaster + "/containers/create").post (params).map (response => {
val res =
if (response.status == 201) {
Json.toJson (
Map (
"success" ->Json.toJson ("The container has been created with image " + image)
)
)
}
else {Json.toJson (
Map (
"error" -> Json.toJson (response.body)
)
)
}
println ("Requete Ok " + image)
out ! res.toString ()
self ! PoisonPill
})
}
}
}
|
snigle/FrontDockerOrchestrator
|
PlayProject/app/actors/ContainersActor.scala
|
Scala
|
mit
| 3,152 |
/*-
* #%L
* FWAPP Framework
* %%
* Copyright (C) 2016 - 2017 Open Design Flow
* %%
* This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.odfi.wsb.fwapp
import com.idyria.osi.wsb.core.WSBEngine
import com.idyria.osi.wsb.webapp.http.connector.HTTPConnector
import org.odfi.indesign.core.module.IndesignModule
import org.odfi.wsb.fwapp.assets.AssetsResolver
import org.odfi.wsb.fwapp.assets.AssetsManager
import org.odfi.wsb.fwapp.assets.ResourcesAssetSource
import org.odfi.wsb.fwapp.session.SessionIntermediary
import com.idyria.osi.wsb.webapp.http.message.HTTPIntermediary
import com.idyria.osi.wsb.webapp.http.message.HTTPPathIntermediary
import org.apache.http.HttpResponse
import com.idyria.osi.wsb.webapp.http.message.HTTPResponse
import com.idyria.osi.wsb.core.broker.tree.Intermediary
import org.odfi.wsb.fwapp.assets.AssetsSource
import org.odfi.indesign.core.main.IndesignPlatorm
trait FWappApp extends IndesignModule with org.odfi.wsb.fwapp.FWappTreeBuilder {
override def getDisplayName = getClass.getName.replace("$", "")
/**
* WSB Engine used for operations
*/
val engine = new WSBEngine
/**
* The PreTree is added to the broker before the main tree
*/
var preTree = new SessionIntermediary
/**
* Main tree
*/
var tree: Option[FWappIntermediary] = None
/**
* Assets resolver declared in app...useful to create dynamic paths
*/
var assetsResolver: Option[AssetsResolver] = None
/**
* use provided tree as Application tree
*/
def mergeTree(t: FWappIntermediary) = {
//-- Save
/*t.deriveFrom(this)
tree = Some(t)
preTree <= t*/
//-- Find Assets Resolver to add default framework
/*t.on("updated") {
t.findChildOfType[AssetsResolver] match {
case Some(resolver) =>
println(s"Found AssetsResolver for: "+resolver.fullURLPath)
//AssetsManager.addAssetsSource("fwapp", new ResourcesAssetSource("")).addFilesSource("fwapp")
assetsResolver = Some(resolver)
case None =>
println(s"Cannot find AssetsResolver")
}
}*/
//-- Return
t
}
// var appTree = new FWappTree
def listen(port: Int) = {
var conn = new HTTPConnector(port)
engine.network.addConnector(conn)
conn
}
def getHTTPListenPort = {
this.engine.network.connectors.collectFirst {
case http : HTTPConnector => http.port
}
}
def start = {
IndesignPlatorm use this
IndesignPlatorm.start
}
}
|
opendesignflow/fwapp
|
src/main/scala/org/odfi/wsb/fwapp/FWappApp.scala
|
Scala
|
agpl-3.0
| 3,100 |
import org.scalacheck.Arbitrary
import org.specs2.scalaz._
import scalaz.scalacheck.ScalazProperties._
class VecSpec extends Spec {
implicit val arbitraryVec = Arbitrary {
for {
(x, y) <- Arbitrary.arbitrary[(Int, Int)]
} yield Vec(x, y)
}
checkAll(equal.laws[Vec])
checkAll(monoid.laws[Vec])
}
|
debasishg/proptest
|
src/test/scala/net/debasishg/prop/VecSpec.scala
|
Scala
|
apache-2.0
| 319 |
package junto.graph
import scalax.collection.GraphEdge._
final class RWUnDiEdgeAssoc[N](val e: UnDiEdge[N]) {
def ^(rweight: Double) =
new RWUnDiEdge[N](e.nodes, rweight)
}
|
scalanlp/junto
|
src/main/scala/junto/graph/RWUnDiEdgeAssoc.scala
|
Scala
|
apache-2.0
| 183 |
package colossus.service
import colossus.util.DataSize._
import org.scalatest.{WordSpec, MustMatchers}
import scala.concurrent.duration.Duration
class ServiceConfigLoadingSpec extends WordSpec with MustMatchers {
"Service configuration loading" should {
"load defaults" in {
val config = ServiceConfig.Default
config.logErrors mustBe true
config.maxRequestSize mustBe 10.MB
config.requestBufferSize mustBe 100
config.requestMetrics mustBe true
config.requestTimeout mustBe Duration.Inf
}
"load a config based on path with fallback to defaults" in {
val config = ServiceConfig.load("config-loading-spec")
config.requestBufferSize mustBe 9876
config.requestMetrics mustBe true
}
"throw a ServiceConfigException when something is wrong" in {
intercept[ServiceConfigException] {
ServiceConfig.load("bad-config")
}
}
}
}
|
tumblr/colossus
|
colossus-tests/src/test/scala/colossus/service/ServiceConfigLoadingSpec.scala
|
Scala
|
apache-2.0
| 926 |
package controllers
import scala.concurrent.duration._
import views._
import lila.api.Context
import lila.app._
import lila.ublog.{ UblogBlog, UblogPost }
import lila.user.{ User => UserModel }
import play.api.i18n.Lang
import lila.i18n.LangList
import lila.report.Suspect
final class Ublog(env: Env) extends LilaController(env) {
import views.html.ublog.post.{ editUrlOfPost, urlOfPost }
import views.html.ublog.blog.{ urlOfBlog }
import lila.common.paginator.Paginator.zero
def index(username: String, page: Int) = Open { implicit ctx =>
NotForKids {
OptionFuResult(env.user.repo named username) { user =>
env.ublog.api.getUserBlog(user) flatMap { blog =>
(canViewBlogOf(user, blog) ?? env.ublog.paginator.byUser(user, true, page)) map { posts =>
Ok(html.ublog.blog(user, blog, posts))
}
}
}
}
}
def drafts(username: String, page: Int) = Auth { implicit ctx => me =>
NotForKids {
if (!me.is(username)) Redirect(routes.Ublog.drafts(me.username)).fuccess
else
env.ublog.paginator.byUser(me, false, page) map { posts =>
Ok(html.ublog.index.drafts(me, posts))
}
}
}
def post(username: String, slug: String, id: String) = Open { implicit ctx =>
NotForKids {
OptionFuResult(env.user.repo named username) { user =>
env.ublog.api.getUserBlog(user) flatMap { blog =>
env.ublog.api.findByIdAndBlog(UblogPost.Id(id), blog.id) flatMap {
_.filter(canViewPost(user, blog)).fold(notFound) { post =>
if (slug != post.slug) Redirect(urlOfPost(post)).fuccess
else {
env.ublog.api.otherPosts(UblogBlog.Id.User(user.id), post) zip
ctx.me.??(env.ublog.rank.liked(post)) zip
ctx.userId.??(env.relation.api.fetchFollows(_, user.id)) map {
case ((others, liked), followed) =>
val viewedPost = env.ublog.viewCounter(post, ctx.ip)
val markup = scalatags.Text.all.raw(env.ublog.markup(post))
Ok(html.ublog.post(user, blog, viewedPost, markup, others, liked, followed))
}
}
}
}
}
}
}
}
def form(username: String) = Auth { implicit ctx => me =>
NotForKids {
if (env.ublog.api.canBlog(me)) {
if (!me.is(username)) Redirect(routes.Ublog.form(me.username)).fuccess
else
env.ublog.form.anyCaptcha map { captcha =>
Ok(html.ublog.form.create(me, env.ublog.form.create, captcha))
}
} else
Unauthorized(
html.site.message.notYet(
"Please play a few games and wait 2 days before you can create blog posts."
)
).fuccess
}
}
private val CreateLimitPerUser = new lila.memo.RateLimit[UserModel.ID](
credits = 5 * 3,
duration = 24.hour,
key = "ublog.create.user"
)
def create = AuthBody { implicit ctx => me =>
NotForKids {
env.ublog.form.create
.bindFromRequest()(ctx.body, formBinding)
.fold(
err =>
env.ublog.form.anyCaptcha map { captcha =>
BadRequest(html.ublog.form.create(me, err, captcha))
},
data =>
CreateLimitPerUser(me.id, cost = if (me.isVerified) 1 else 3) {
env.ublog.api.create(data, me) map { post =>
lila.mon.ublog.create(me.id).increment()
Redirect(editUrlOfPost(post)).flashSuccess
}
}(rateLimitedFu)
)
}
}
def edit(id: String) = AuthBody { implicit ctx => me =>
NotForKids {
OptionOk(env.ublog.api.findByUserBlogOrAdmin(UblogPost.Id(id), me)) { post =>
html.ublog.form.edit(post, env.ublog.form.edit(post))
}
}
}
def update(id: String) = AuthBody { implicit ctx => me =>
NotForKids {
env.ublog.api.findByUserBlogOrAdmin(UblogPost.Id(id), me) flatMap {
_ ?? { prev =>
env.ublog.form
.edit(prev)
.bindFromRequest()(ctx.body, formBinding)
.fold(
err => BadRequest(html.ublog.form.edit(prev, err)).fuccess,
data =>
env.ublog.api.update(data, prev, me) flatMap { post =>
logModAction(post, "edit") inject
Redirect(urlOfPost(post)).flashSuccess
}
)
}
}
}
}
def delete(id: String) = AuthBody { implicit ctx => me =>
env.ublog.api.findByUserBlogOrAdmin(UblogPost.Id(id), me) flatMap {
_ ?? { post =>
env.ublog.api.delete(post) >>
logModAction(post, "delete") inject
Redirect(urlOfBlog(post.blog)).flashSuccess
}
}
}
private def logModAction(post: UblogPost, action: String)(implicit ctx: Context): Funit =
isGranted(_.ModerateBlog) ?? ctx.me ?? { me =>
!me.is(post.created.by) ?? {
env.user.repo.byId(post.created.by) flatMap {
_ ?? { user =>
env.mod.logApi.blogPostEdit(lila.report.Mod(me), Suspect(user), post.id.value, post.title, action)
}
}
}
}
def like(id: String, v: Boolean) = Auth { implicit ctx => me =>
NoBot {
NotForKids {
env.ublog.rank.like(UblogPost.Id(id), me, v) map { likes =>
Ok(likes.value)
}
}
}
}
def redirect(id: String) = Open { implicit ctx =>
env.ublog.api.postPreview(UblogPost.Id(id)) flatMap {
_.fold(notFound) { post =>
Redirect(urlOfPost(post)).fuccess
}
}
}
def setTier(blogId: String) = SecureBody(_.ModerateBlog) { implicit ctx => me =>
UblogBlog.Id(blogId).??(env.ublog.api.getBlog) flatMap {
_ ?? { blog =>
implicit val body = ctx.body
lila.ublog.UblogForm.tier
.bindFromRequest()
.fold(
err => Redirect(urlOfBlog(blog)).flashFailure.fuccess,
tier =>
for {
user <- env.user.repo.byId(blog.userId) orFail "Missing blog user!" dmap Suspect
_ <- env.ublog.api.setTier(blog.id, tier)
_ <- env.ublog.rank.recomputeRankOfAllPostsOfBlog(blog.id)
_ <- env.mod.logApi
.blogTier(lila.report.Mod(me.user), user, blog.id.full, UblogBlog.Tier.name(tier))
} yield Redirect(urlOfBlog(blog)).flashSuccess
)
}
}
}
private val ImageRateLimitPerIp = lila.memo.RateLimit.composite[lila.common.IpAddress](
key = "ublog.image.ip"
)(
("fast", 10, 2.minutes),
("slow", 60, 1.day)
)
def image(id: String) =
AuthBody(parse.multipartFormData) { implicit ctx => me =>
env.ublog.api.findByUserBlogOrAdmin(UblogPost.Id(id), me) flatMap {
_ ?? { post =>
ctx.body.body.file("image") match {
case Some(image) =>
ImageRateLimitPerIp(ctx.ip) {
env.ublog.api.uploadImage(me, post, image) map { newPost =>
Ok(html.ublog.form.formImage(newPost))
} recover { case e: Exception =>
BadRequest(e.getMessage)
}
}(rateLimitedFu)
case None =>
env.ublog.api.deleteImage(post) flatMap { newPost =>
logModAction(newPost, "delete image") inject
Ok(html.ublog.form.formImage(newPost))
}
}
}
}
}
def friends(page: Int) = Auth { implicit ctx => me =>
NotForKids {
Reasonable(page, 10) {
env.ublog.paginator.liveByFollowed(me, page) map { posts =>
Ok(html.ublog.index.friends(posts))
}
}
}
}
def community(code: String, page: Int) = Open { implicit ctx =>
NotForKids {
val l = Lang.get(code).filter(LangList.popularNoRegion.contains)
Reasonable(page, 8) {
env.ublog.paginator.liveByCommunity(l, page) map { posts =>
Ok(html.ublog.index.community(l, posts))
}
}
}
}
def liked(page: Int) = Auth { implicit ctx => me =>
NotForKids {
Reasonable(page, 15) {
ctx.me ?? { me =>
env.ublog.paginator.liveByLiked(me, page) map { posts =>
Ok(html.ublog.index.liked(posts))
}
}
}
}
}
def topics = Open { implicit ctx =>
NotForKids {
env.ublog.topic.withPosts map { topics =>
Ok(html.ublog.index.topics(topics))
}
}
}
def topic(str: String, page: Int) = Open { implicit ctx =>
NotForKids {
Reasonable(page, 5) {
lila.ublog.UblogTopic.fromUrl(str) ?? { top =>
env.ublog.paginator.liveByTopic(top, page) map { posts =>
Ok(html.ublog.index.topic(top, posts))
}
}
}
}
}
def userAtom(username: String) = Action.async { implicit req =>
env.user.repo.enabledNamed(username) flatMap {
case None => NotFound.fuccess
case Some(user) =>
implicit val lang = reqLang
env.ublog.api.getUserBlog(user) flatMap { blog =>
(isBlogVisible(user, blog) ?? env.ublog.paginator.byUser(user, true, 1)) map { posts =>
Ok(html.ublog.atom(user, blog, posts.currentPageResults)) as XML
}
}
}
}
private def isBlogVisible(user: UserModel, blog: UblogBlog) = user.enabled && blog.visible
private def canViewBlogOf(user: UserModel, blog: UblogBlog)(implicit ctx: Context) =
ctx.is(user) || isGranted(_.ModerateBlog) || isBlogVisible(user, blog)
private def canViewPost(user: UserModel, blog: UblogBlog)(post: UblogPost)(implicit ctx: Context) =
canViewBlogOf(user, blog) && (ctx.is(user) || post.live)
}
|
luanlv/lila
|
app/controllers/Ublog.scala
|
Scala
|
mit
| 9,767 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600ei.v3.retriever
import uk.gov.hmrc.ct.box.retriever.{BoxRetriever, FilingAttributesBoxValueRetriever}
import uk.gov.hmrc.ct.ct600ei.v3.{DIT001, DIT002, DIT003}
trait CT600EiBoxRetriever extends BoxRetriever {
self: FilingAttributesBoxValueRetriever =>
def dit001(): DIT001
def dit002(): DIT002
def dit003(): DIT003
}
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600ei/v3/retriever/CT600EiBoxRetriever.scala
|
Scala
|
apache-2.0
| 967 |
package satisfaction.fs
import java.io._
import org.joda.time.DateTime
/**
* Simple FileSystem, for accessing local disk.
* Mostly for testing...
*
* XXX Add unit tests for local file operations
*/
case class LocalFileSystem() extends FileSystem {
case class LocalFStatus( file : java.io.File ) extends FileStatus {
override def size : Long = {
file.length
}
override def isDirectory : Boolean = {
file.isDirectory
}
override def isFile : Boolean = {
file.isFile
}
override def path : Path = {
new Path( file.getPath)
}
override def lastAccessed : DateTime = {
new DateTime(file.lastModified)
}
override def created : DateTime = {
/// XXX not correct
lastAccessed
}
}
implicit def File2FileStatus( file : java.io.File ) : FileStatus = {
new LocalFStatus(file)
}
implicit def Path2File( path : Path) : java.io.File = {
new File( path.toUri.getPath )
}
implicit def File2Path( file : java.io.File) : Path = {
new Path( file.getPath)
}
override def uri : java.net.URI = {
return new java.net.URI( s"file:///")
}
override def listFiles( p : Path ) : Seq[FileStatus] = {
val file :File = (p)
val lf = file.listFiles
if( lf == null ) {
Seq.empty
} else {
lf.map(f => {
new LocalFStatus(f)
} ).toSeq
}
}
override def listFilesRecursively( p : Path ) : Seq[FileStatus] = {
listFiles( (p) ).map( fs => {
if( fs.isFile ) {
Seq( fs)
} else if( fs.isDirectory ) {
listFilesRecursively( fs.path ) ++ Seq(fs)
} else {
Seq.empty
}
} ).flatten
}
override def mkdirs( p : Path ) : Boolean = {
(p).mkdirs
}
override def open( path : Path) : java.io.InputStream = {
new FileInputStream((path))
}
override def create( path : Path ) : java.io.OutputStream = {
new FileOutputStream((path))
}
override def exists( p : Path ) : Boolean = {
(p).exists
}
override def isDirectory( p : Path ) : Boolean = {
(p).isDirectory
}
override def isFile( p : Path ) : Boolean = {
(p).isFile
}
override def getStatus( p : Path ) : FileStatus = {
val f : File = (p)
f
}
override def delete( p : Path ) = {
/// XXX handle return value
(p).delete
}
def setExecutable( p: Path, flag: Boolean = true ) = {
(p).setExecutable( flag)
}
}
object LocalFileSystem extends LocalFileSystem {
def currentDirectory : Path = {
new Path( System.getProperty("user.dir"))
}
def relativePath( p : Path) : Path = {
currentDirectory / p
}
}
|
ifwe/satisfaction
|
modules/core/src/main/scala/satisfaction/fs/LocalFs.scala
|
Scala
|
apache-2.0
| 2,877 |
package lila
package object api extends PackageObject
|
luanlv/lila
|
modules/api/src/main/package.scala
|
Scala
|
mit
| 55 |
import types.Types.{Solution, OptimalSolution, ProblemData}
import scala.util.Random
package object algorithms {
def cost(problem: ProblemData, solution: Solution): Int = {
val (nItems, m1, m2) = problem
val sumatory = for {
i <- 0 until nItems
j <- 0 until nItems
} yield m1(i)(j) * m2(solution(i)-1)(solution(j)-1)
sumatory.sum
}
def mutate(solution: Solution, mutationSize: Int, random: Random) = {
val subListStart = random.nextInt(solution.length - mutationSize)
val subList = solution.slice(subListStart, subListStart + mutationSize)
val newSubList = random.shuffle(subList.toList).toArray
val newSolution = solution.clone()
for (i <- subListStart until subListStart + mutationSize) {
newSolution.update(i, newSubList(i-subListStart))
}
newSolution
}
def randomSolution(size: Int, random: Random) = {
val sol = 1 to size
random.shuffle(sol).toArray
}
}
|
Truji92/MH-algoritmos-basados-en-trayectorias
|
src/main/scala/algorithms/package.scala
|
Scala
|
mit
| 950 |
package com.jackbeasley.enigma
class Plugboard(cipher:Cipher) {
def mapForward(input:Char):Char = cipher.cipherForward(input)
def mapBackward(input:Char):Char = cipher.cipherBackward(input)
}
object Plugboard {
def isValid(settings:Array[(Char,Char)]):Boolean = {
var rawString = ""
// Empty the mappings to a string
settings.foreach{case(one:Char,two:Char) => rawString += "" + one + two}
// Loop through the string (char, index)
for(t <- rawString.zipWithIndex){
// String of everything but the tested char
val excluded = rawString.substring(0,t._2) + rawString.substring(t._2 + 1)
if(excluded.contains(t._1)){
// Double mapping
return false
}
}
// No double mappings
return true
}
def toCipher(settings:Array[(Char,Char)]):Cipher = {
// Check for validity
if(isValid(settings)){
// Create inital array with alphabetical mappings
val table = ('A' to 'Z') zip ('A' to 'Z')
val finalTable = table.map(t => Plugboard.determineMapping(t, settings))
return new Cipher(finalTable.toArray)
} else {
// TODO: add safe exception
return new Cipher(Array())
}
}
def determineMapping(input:(Char,Char), settings:Array[(Char,Char)]):(Char,Char) = {
val target = input._2
val left = settings.unzip._1.zipWithIndex
val right = settings.unzip._2.zipWithIndex
for(t <- left){
if(t._1 == target) return (t._1, settings(t._2)._2)
}
for(t <- right){
if(t._1 == target) return (t._1, settings(t._2)._1)
}
// No plugboard mapping found
return (target,target)
}
}
|
jackbeasley/enigma
|
src/main/scala/Plugboard.scala
|
Scala
|
gpl-3.0
| 1,642 |
// scalac: -Xasync
object Test extends scala.tools.partest.JUnitTest(classOf[scala.async.run.ifelse1.IfElse1Spec])
package scala.async.run.ifelse1 {
import language.{reflectiveCalls, postfixOps}
import scala.concurrent.{Future, ExecutionContext, Await}
import scala.concurrent.duration._
import scala.tools.testkit.async.Async.{async, await}
import org.junit.Test
import org.junit.Assert._
class TestIfElse1Class {
import ExecutionContext.Implicits.global
def base(x: Int): Future[Int] = Future {
x + 2
}
def m1(y: Int): Future[Int] = async {
val f = base(y)
var z = 0
if (y > 0) {
if (y > 100)
5
else {
val x1 = await(f)
z = x1 + 2
}
} else {
val x2 = await(f)
z = x2 - 2
}
z
}
def m2(y: Int): Future[Int] = async {
val f = base(y)
var z = 0
if (y > 0) {
if (y < 100) {
val x1 = await(f)
z = x1 + 2
}
else
5
} else {
val x2 = await(f)
z = x2 - 2
}
z
}
def m3(y: Int): Future[Int] = async {
val f = base(y)
var z = 0
if (y < 0) {
val x2 = await(f)
z = x2 - 2
} else {
if (y > 100)
5
else {
val x1 = await(f)
z = x1 + 2
}
}
z
}
def m4(y: Int): Future[Int] = async {
val f = base(y)
var z = 0
if (y < 0) {
val x2 = await(f)
z = x2 - 2
} else {
if (y < 100) {
val x1 = await(f)
z = x1 + 2
} else
5
}
z
}
def pred: Future[Boolean] = async(true)
def m5: Future[Boolean] = async {
if(if(if(if(if(if(if(if(if(if(if(if(if(if(if(if(if(if(if(if(if(await(pred))
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false)
await(pred)
else
false
}
}
class IfElse1Spec {
@Test
def `await in a nested if-else expression`(): Unit = {
val o = new TestIfElse1Class
val fut = o.m1(10)
val res = Await.result(fut, 2 seconds)
assertEquals(14, res)
}
@Test
def `await in a nested if-else expression 2`(): Unit = {
val o = new TestIfElse1Class
val fut = o.m2(10)
val res = Await.result(fut, 2 seconds)
assertEquals(14, res)
}
@Test
def `await in a nested if-else expression 3`(): Unit = {
val o = new TestIfElse1Class
val fut = o.m3(10)
val res = Await.result(fut, 2 seconds)
assertEquals(14, res)
}
@Test
def `await in a nested if-else expression 4`(): Unit = {
val o = new TestIfElse1Class
val fut = o.m4(10)
val res = Await.result(fut, 2 seconds)
assertEquals(14, res)
}
@Test
def `await in deeply-nested if-else conditions`(): Unit = {
val o = new TestIfElse1Class
val fut = o.m5
val res = Await.result(fut, 2 seconds)
assertEquals(true, res)
}
}
}
|
lrytz/scala
|
test/async/jvm/ifelse1.scala
|
Scala
|
apache-2.0
| 3,919 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{ByteArrayOutputStream, CharArrayWriter, DataOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
import scala.util.control.NonFatal
import org.apache.commons.lang3.StringUtils
import org.apache.spark.{SparkException, TaskContext}
import org.apache.spark.annotation.{DeveloperApi, Stable, Unstable}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function._
import org.apache.spark.api.python.{PythonRDD, SerDeUtil}
import org.apache.spark.api.r.RRDD
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.QueryPlanningTracker
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.HiveTableRelation
import org.apache.spark.sql.catalyst.encoders._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.json.{JacksonGenerator, JSONOptions}
import org.apache.spark.sql.catalyst.optimizer.CombineUnions
import org.apache.spark.sql.catalyst.parser.{ParseException, ParserUtils}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, PartitioningCollection}
import org.apache.spark.sql.catalyst.trees.TreeNodeTag
import org.apache.spark.sql.catalyst.util.IntervalUtils
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.aggregate.TypedAggregateExpression
import org.apache.spark.sql.execution.arrow.{ArrowBatchStreamWriter, ArrowConverters}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2ScanRelation, FileTable}
import org.apache.spark.sql.execution.python.EvaluatePython
import org.apache.spark.sql.execution.stat.StatFunctions
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.DataStreamWriter
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
import org.apache.spark.util.Utils
private[sql] object Dataset {
val curId = new java.util.concurrent.atomic.AtomicLong()
val DATASET_ID_KEY = "__dataset_id"
val COL_POS_KEY = "__col_position"
val DATASET_ID_TAG = TreeNodeTag[Long]("dataset_id")
def apply[T: Encoder](sparkSession: SparkSession, logicalPlan: LogicalPlan): Dataset[T] = {
val dataset = new Dataset(sparkSession, logicalPlan, implicitly[Encoder[T]])
// Eagerly bind the encoder so we verify that the encoder matches the underlying
// schema. The user will get an error if this is not the case.
// optimization: it is guaranteed that [[InternalRow]] can be converted to [[Row]] so
// do not do this check in that case. this check can be expensive since it requires running
// the whole [[Analyzer]] to resolve the deserializer
if (dataset.exprEnc.clsTag.runtimeClass != classOf[Row]) {
dataset.resolvedEnc
}
dataset
}
def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame =
sparkSession.withActive {
val qe = sparkSession.sessionState.executePlan(logicalPlan)
qe.assertAnalyzed()
new Dataset[Row](qe, RowEncoder(qe.analyzed.schema))
}
/** A variant of ofRows that allows passing in a tracker so we can track query parsing time. */
def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan, tracker: QueryPlanningTracker)
: DataFrame = sparkSession.withActive {
val qe = new QueryExecution(sparkSession, logicalPlan, tracker)
qe.assertAnalyzed()
new Dataset[Row](qe, RowEncoder(qe.analyzed.schema))
}
}
/**
* A Dataset is a strongly typed collection of domain-specific objects that can be transformed
* in parallel using functional or relational operations. Each Dataset also has an untyped view
* called a `DataFrame`, which is a Dataset of [[Row]].
*
* Operations available on Datasets are divided into transformations and actions. Transformations
* are the ones that produce new Datasets, and actions are the ones that trigger computation and
* return results. Example transformations include map, filter, select, and aggregate (`groupBy`).
* Example actions count, show, or writing data out to file systems.
*
* Datasets are "lazy", i.e. computations are only triggered when an action is invoked. Internally,
* a Dataset represents a logical plan that describes the computation required to produce the data.
* When an action is invoked, Spark's query optimizer optimizes the logical plan and generates a
* physical plan for efficient execution in a parallel and distributed manner. To explore the
* logical plan as well as optimized physical plan, use the `explain` function.
*
* To efficiently support domain-specific objects, an [[Encoder]] is required. The encoder maps
* the domain specific type `T` to Spark's internal type system. For example, given a class `Person`
* with two fields, `name` (string) and `age` (int), an encoder is used to tell Spark to generate
* code at runtime to serialize the `Person` object into a binary structure. This binary structure
* often has much lower memory footprint as well as are optimized for efficiency in data processing
* (e.g. in a columnar format). To understand the internal binary representation for data, use the
* `schema` function.
*
* There are typically two ways to create a Dataset. The most common way is by pointing Spark
* to some files on storage systems, using the `read` function available on a `SparkSession`.
* {{{
* val people = spark.read.parquet("...").as[Person] // Scala
* Dataset<Person> people = spark.read().parquet("...").as(Encoders.bean(Person.class)); // Java
* }}}
*
* Datasets can also be created through transformations available on existing Datasets. For example,
* the following creates a new Dataset by applying a filter on the existing one:
* {{{
* val names = people.map(_.name) // in Scala; names is a Dataset[String]
* Dataset<String> names = people.map((Person p) -> p.name, Encoders.STRING));
* }}}
*
* Dataset operations can also be untyped, through various domain-specific-language (DSL)
* functions defined in: Dataset (this class), [[Column]], and [[functions]]. These operations
* are very similar to the operations available in the data frame abstraction in R or Python.
*
* To select a column from the Dataset, use `apply` method in Scala and `col` in Java.
* {{{
* val ageCol = people("age") // in Scala
* Column ageCol = people.col("age"); // in Java
* }}}
*
* Note that the [[Column]] type can also be manipulated through its various functions.
* {{{
* // The following creates a new column that increases everybody's age by 10.
* people("age") + 10 // in Scala
* people.col("age").plus(10); // in Java
* }}}
*
* A more concrete example in Scala:
* {{{
* // To create Dataset[Row] using SparkSession
* val people = spark.read.parquet("...")
* val department = spark.read.parquet("...")
*
* people.filter("age > 30")
* .join(department, people("deptId") === department("id"))
* .groupBy(department("name"), people("gender"))
* .agg(avg(people("salary")), max(people("age")))
* }}}
*
* and in Java:
* {{{
* // To create Dataset<Row> using SparkSession
* Dataset<Row> people = spark.read().parquet("...");
* Dataset<Row> department = spark.read().parquet("...");
*
* people.filter(people.col("age").gt(30))
* .join(department, people.col("deptId").equalTo(department.col("id")))
* .groupBy(department.col("name"), people.col("gender"))
* .agg(avg(people.col("salary")), max(people.col("age")));
* }}}
*
* @groupname basic Basic Dataset functions
* @groupname action Actions
* @groupname untypedrel Untyped transformations
* @groupname typedrel Typed transformations
*
* @since 1.6.0
*/
@Stable
class Dataset[T] private[sql](
@DeveloperApi @Unstable @transient val queryExecution: QueryExecution,
@DeveloperApi @Unstable @transient val encoder: Encoder[T])
extends Serializable {
@transient lazy val sparkSession: SparkSession = {
if (queryExecution == null || queryExecution.sparkSession == null) {
throw new SparkException(
"Dataset transformations and actions can only be invoked by the driver, not inside of" +
" other Dataset transformations; for example, dataset1.map(x => dataset2.values.count()" +
" * x) is invalid because the values transformation and count action cannot be " +
"performed inside of the dataset1.map transformation. For more information," +
" see SPARK-28702.")
}
queryExecution.sparkSession
}
// A globally unique id of this Dataset.
private val id = Dataset.curId.getAndIncrement()
queryExecution.assertAnalyzed()
// Note for Spark contributors: if adding or updating any action in `Dataset`, please make sure
// you wrap it with `withNewExecutionId` if this actions doesn't call other action.
def this(sparkSession: SparkSession, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sparkSession.sessionState.executePlan(logicalPlan), encoder)
}
def this(sqlContext: SQLContext, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sqlContext.sparkSession, logicalPlan, encoder)
}
@transient private[sql] val logicalPlan: LogicalPlan = {
// For various commands (like DDL) and queries with side effects, we force query execution
// to happen right away to let these side effects take place eagerly.
val plan = queryExecution.analyzed match {
case c: Command =>
LocalRelation(c.output, withAction("command", queryExecution)(_.executeCollect()))
case u @ Union(children) if children.forall(_.isInstanceOf[Command]) =>
LocalRelation(u.output, withAction("command", queryExecution)(_.executeCollect()))
case _ =>
queryExecution.analyzed
}
if (sparkSession.sessionState.conf.getConf(SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED)) {
plan.setTagValue(Dataset.DATASET_ID_TAG, id)
}
plan
}
/**
* Currently [[ExpressionEncoder]] is the only implementation of [[Encoder]], here we turn the
* passed in encoder to [[ExpressionEncoder]] explicitly, and mark it implicit so that we can use
* it when constructing new Dataset objects that have the same object type (that will be
* possibly resolved to a different schema).
*/
private[sql] implicit val exprEnc: ExpressionEncoder[T] = encoderFor(encoder)
// The resolved `ExpressionEncoder` which can be used to turn rows to objects of type T, after
// collecting rows to the driver side.
private lazy val resolvedEnc = {
exprEnc.resolveAndBind(logicalPlan.output, sparkSession.sessionState.analyzer)
}
private implicit def classTag = exprEnc.clsTag
// sqlContext must be val because a stable identifier is expected when you import implicits
@transient lazy val sqlContext: SQLContext = sparkSession.sqlContext
private[sql] def resolve(colName: String): NamedExpression = {
val resolver = sparkSession.sessionState.analyzer.resolver
queryExecution.analyzed.resolveQuoted(colName, resolver)
.getOrElse {
val fields = schema.fieldNames
val extraMsg = if (fields.exists(resolver(_, colName))) {
s"; did you mean to quote the `$colName` column?"
} else ""
val fieldsStr = fields.mkString(", ")
val errorMsg = s"""Cannot resolve column name "$colName" among (${fieldsStr})${extraMsg}"""
throw new AnalysisException(errorMsg)
}
}
private[sql] def numericColumns: Seq[Expression] = {
schema.fields.filter(_.dataType.isInstanceOf[NumericType]).map { n =>
queryExecution.analyzed.resolveQuoted(n.name, sparkSession.sessionState.analyzer.resolver).get
}
}
/**
* Get rows represented in Sequence by specific truncate and vertical requirement.
*
* @param numRows Number of rows to return
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
*/
private[sql] def getRows(
numRows: Int,
truncate: Int): Seq[Seq[String]] = {
val newDf = toDF()
val castCols = newDf.logicalPlan.output.map { col =>
// Since binary types in top-level schema fields have a specific format to print,
// so we do not cast them to strings here.
if (col.dataType == BinaryType) {
Column(col)
} else {
Column(col).cast(StringType)
}
}
val data = newDf.select(castCols: _*).take(numRows + 1)
// For array values, replace Seq and Array with square brackets
// For cells that are beyond `truncate` characters, replace it with the
// first `truncate-3` and "..."
schema.fieldNames.toSeq +: data.map { row =>
row.toSeq.map { cell =>
val str = cell match {
case null => "null"
case binary: Array[Byte] => binary.map("%02X".format(_)).mkString("[", " ", "]")
case _ => cell.toString
}
if (truncate > 0 && str.length > truncate) {
// do not show ellipses for strings shorter than 4 characters.
if (truncate < 4) str.substring(0, truncate)
else str.substring(0, truncate - 3) + "..."
} else {
str
}
}: Seq[String]
}
}
/**
* Compose the string representing rows for output
*
* @param _numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @param vertical If set to true, prints output rows vertically (one line per column value).
*/
private[sql] def showString(
_numRows: Int,
truncate: Int = 20,
vertical: Boolean = false): String = {
val numRows = _numRows.max(0).min(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH - 1)
// Get rows represented by Seq[Seq[String]], we may get one more line if it has more data.
val tmpRows = getRows(numRows, truncate)
val hasMoreData = tmpRows.length - 1 > numRows
val rows = tmpRows.take(numRows + 1)
val sb = new StringBuilder
val numCols = schema.fieldNames.length
// We set a minimum column width at '3'
val minimumColWidth = 3
if (!vertical) {
// Initialise the width of each column to a minimum value
val colWidths = Array.fill(numCols)(minimumColWidth)
// Compute the width of each column
for (row <- rows) {
for ((cell, i) <- row.zipWithIndex) {
colWidths(i) = math.max(colWidths(i), Utils.stringHalfWidth(cell))
}
}
val paddedRows = rows.map { row =>
row.zipWithIndex.map { case (cell, i) =>
if (truncate > 0) {
StringUtils.leftPad(cell, colWidths(i) - Utils.stringHalfWidth(cell) + cell.length)
} else {
StringUtils.rightPad(cell, colWidths(i) - Utils.stringHalfWidth(cell) + cell.length)
}
}
}
// Create SeparateLine
val sep: String = colWidths.map("-" * _).addString(sb, "+", "+", "+\\n").toString()
// column names
paddedRows.head.addString(sb, "|", "|", "|\\n")
sb.append(sep)
// data
paddedRows.tail.foreach(_.addString(sb, "|", "|", "|\\n"))
sb.append(sep)
} else {
// Extended display mode enabled
val fieldNames = rows.head
val dataRows = rows.tail
// Compute the width of field name and data columns
val fieldNameColWidth = fieldNames.foldLeft(minimumColWidth) { case (curMax, fieldName) =>
math.max(curMax, Utils.stringHalfWidth(fieldName))
}
val dataColWidth = dataRows.foldLeft(minimumColWidth) { case (curMax, row) =>
math.max(curMax, row.map(cell => Utils.stringHalfWidth(cell)).max)
}
dataRows.zipWithIndex.foreach { case (row, i) =>
// "+ 5" in size means a character length except for padded names and data
val rowHeader = StringUtils.rightPad(
s"-RECORD $i", fieldNameColWidth + dataColWidth + 5, "-")
sb.append(rowHeader).append("\\n")
row.zipWithIndex.map { case (cell, j) =>
val fieldName = StringUtils.rightPad(fieldNames(j),
fieldNameColWidth - Utils.stringHalfWidth(fieldNames(j)) + fieldNames(j).length)
val data = StringUtils.rightPad(cell,
dataColWidth - Utils.stringHalfWidth(cell) + cell.length)
s" $fieldName | $data "
}.addString(sb, "", "\\n", "\\n")
}
}
// Print a footer
if (vertical && rows.tail.isEmpty) {
// In a vertical mode, print an empty row set explicitly
sb.append("(0 rows)\\n")
} else if (hasMoreData) {
// For Data that has more than "numRows" records
val rowsString = if (numRows == 1) "row" else "rows"
sb.append(s"only showing top $numRows $rowsString\\n")
}
sb.toString()
}
override def toString: String = {
try {
val builder = new StringBuilder
val fields = schema.take(2).map {
case f => s"${f.name}: ${f.dataType.simpleString(2)}"
}
builder.append("[")
builder.append(fields.mkString(", "))
if (schema.length > 2) {
if (schema.length - fields.size == 1) {
builder.append(" ... 1 more field")
} else {
builder.append(" ... " + (schema.length - 2) + " more fields")
}
}
builder.append("]").toString()
} catch {
case NonFatal(e) =>
s"Invalid tree; ${e.getMessage}:\\n$queryExecution"
}
}
/**
* Converts this strongly typed collection of data to generic Dataframe. In contrast to the
* strongly typed objects that Dataset operations work on, a Dataframe returns generic [[Row]]
* objects that allow fields to be accessed by ordinal or name.
*
* @group basic
* @since 1.6.0
*/
// This is declared with parentheses to prevent the Scala compiler from treating
// `ds.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
def toDF(): DataFrame = new Dataset[Row](queryExecution, RowEncoder(schema))
/**
* Returns a new Dataset where each record has been mapped on to the specified type. The
* method used to map columns depend on the type of `U`:
* <ul>
* <li>When `U` is a class, fields for the class will be mapped to columns of the same name
* (case sensitivity is determined by `spark.sql.caseSensitive`).</li>
* <li>When `U` is a tuple, the columns will be mapped by ordinal (i.e. the first column will
* be assigned to `_1`).</li>
* <li>When `U` is a primitive type (i.e. String, Int, etc), then the first column of the
* `DataFrame` will be used.</li>
* </ul>
*
* If the schema of the Dataset does not match the desired `U` type, you can use `select`
* along with `alias` or `as` to rearrange or rename as required.
*
* Note that `as[]` only changes the view of the data that is passed into typed operations,
* such as `map()`, and does not eagerly project away any columns that are not present in
* the specified class.
*
* @group basic
* @since 1.6.0
*/
def as[U : Encoder]: Dataset[U] = Dataset[U](sparkSession, logicalPlan)
/**
* Converts this strongly typed collection of data to generic `DataFrame` with columns renamed.
* This can be quite convenient in conversion from an RDD of tuples into a `DataFrame` with
* meaningful names. For example:
* {{{
* val rdd: RDD[(Int, String)] = ...
* rdd.toDF() // this implicit conversion creates a DataFrame with column name `_1` and `_2`
* rdd.toDF("id", "name") // this creates a DataFrame with column name "id" and "name"
* }}}
*
* @group basic
* @since 2.0.0
*/
@scala.annotation.varargs
def toDF(colNames: String*): DataFrame = {
require(schema.size == colNames.size,
"The number of columns doesn't match.\\n" +
s"Old column names (${schema.size}): " + schema.fields.map(_.name).mkString(", ") + "\\n" +
s"New column names (${colNames.size}): " + colNames.mkString(", "))
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
Column(oldAttribute).as(newName)
}
select(newCols : _*)
}
/**
* Returns the schema of this Dataset.
*
* @group basic
* @since 1.6.0
*/
def schema: StructType = sparkSession.withActive {
queryExecution.analyzed.schema
}
/**
* Prints the schema to the console in a nice tree format.
*
* @group basic
* @since 1.6.0
*/
def printSchema(): Unit = printSchema(Int.MaxValue)
// scalastyle:off println
/**
* Prints the schema up to the given level to the console in a nice tree format.
*
* @group basic
* @since 3.0.0
*/
def printSchema(level: Int): Unit = println(schema.treeString(level))
// scalastyle:on println
/**
* Prints the plans (logical and physical) with a format specified by a given explain mode.
*
* @param mode specifies the expected output format of plans.
* <ul>
* <li>`simple` Print only a physical plan.</li>
* <li>`extended`: Print both logical and physical plans.</li>
* <li>`codegen`: Print a physical plan and generated codes if they are
* available.</li>
* <li>`cost`: Print a logical plan and statistics if they are available.</li>
* <li>`formatted`: Split explain output into two sections: a physical plan outline
* and node details.</li>
* </ul>
* @group basic
* @since 3.0.0
*/
def explain(mode: String): Unit = sparkSession.withActive {
// Because temporary views are resolved during analysis when we create a Dataset, and
// `ExplainCommand` analyzes input query plan and resolves temporary views again. Using
// `ExplainCommand` here will probably output different query plans, compared to the results
// of evaluation of the Dataset. So just output QueryExecution's query plans here.
// scalastyle:off println
println(queryExecution.explainString(ExplainMode.fromString(mode)))
// scalastyle:on println
}
/**
* Prints the plans (logical and physical) to the console for debugging purposes.
*
* @param extended default `false`. If `false`, prints only the physical plan.
*
* @group basic
* @since 1.6.0
*/
def explain(extended: Boolean): Unit = if (extended) {
explain(ExtendedMode.name)
} else {
explain(SimpleMode.name)
}
/**
* Prints the physical plan to the console for debugging purposes.
*
* @group basic
* @since 1.6.0
*/
def explain(): Unit = explain(SimpleMode.name)
/**
* Returns all column names and their data types as an array.
*
* @group basic
* @since 1.6.0
*/
def dtypes: Array[(String, String)] = schema.fields.map { field =>
(field.name, field.dataType.toString)
}
/**
* Returns all column names as an array.
*
* @group basic
* @since 1.6.0
*/
def columns: Array[String] = schema.fields.map(_.name)
/**
* Returns true if the `collect` and `take` methods can be run locally
* (without any Spark executors).
*
* @group basic
* @since 1.6.0
*/
def isLocal: Boolean = logicalPlan.isInstanceOf[LocalRelation]
/**
* Returns true if the `Dataset` is empty.
*
* @group basic
* @since 2.4.0
*/
def isEmpty: Boolean = withAction("isEmpty", select().queryExecution) { plan =>
plan.executeTake(1).isEmpty
}
/**
* Returns true if this Dataset contains one or more sources that continuously
* return data as it arrives. A Dataset that reads data from a streaming source
* must be executed as a `StreamingQuery` using the `start()` method in
* `DataStreamWriter`. Methods that return a single answer, e.g. `count()` or
* `collect()`, will throw an [[AnalysisException]] when there is a streaming
* source present.
*
* @group streaming
* @since 2.0.0
*/
def isStreaming: Boolean = logicalPlan.isStreaming
/**
* Eagerly checkpoint a Dataset and return the new Dataset. Checkpointing can be used to truncate
* the logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
def checkpoint(): Dataset[T] = checkpoint(eager = true, reliableCheckpoint = true)
/**
* Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
* logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
def checkpoint(eager: Boolean): Dataset[T] = checkpoint(eager = eager, reliableCheckpoint = true)
/**
* Eagerly locally checkpoints a Dataset and return the new Dataset. Checkpointing can be
* used to truncate the logical plan of this Dataset, which is especially useful in iterative
* algorithms where the plan may grow exponentially. Local checkpoints are written to executor
* storage and despite potentially faster they are unreliable and may compromise job completion.
*
* @group basic
* @since 2.3.0
*/
def localCheckpoint(): Dataset[T] = checkpoint(eager = true, reliableCheckpoint = false)
/**
* Locally checkpoints a Dataset and return the new Dataset. Checkpointing can be used to truncate
* the logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. Local checkpoints are written to executor storage and despite
* potentially faster they are unreliable and may compromise job completion.
*
* @group basic
* @since 2.3.0
*/
def localCheckpoint(eager: Boolean): Dataset[T] = checkpoint(
eager = eager,
reliableCheckpoint = false
)
/**
* Returns a checkpointed version of this Dataset.
*
* @param eager Whether to checkpoint this dataframe immediately
* @param reliableCheckpoint Whether to create a reliable checkpoint saved to files inside the
* checkpoint directory. If false creates a local checkpoint using
* the caching subsystem
*/
private def checkpoint(eager: Boolean, reliableCheckpoint: Boolean): Dataset[T] = {
val actionName = if (reliableCheckpoint) "checkpoint" else "localCheckpoint"
withAction(actionName, queryExecution) { physicalPlan =>
val internalRdd = physicalPlan.execute().map(_.copy())
if (reliableCheckpoint) {
internalRdd.checkpoint()
} else {
internalRdd.localCheckpoint()
}
if (eager) {
internalRdd.count()
}
// Takes the first leaf partitioning whenever we see a `PartitioningCollection`. Otherwise the
// size of `PartitioningCollection` may grow exponentially for queries involving deep inner
// joins.
def firstLeafPartitioning(partitioning: Partitioning): Partitioning = {
partitioning match {
case p: PartitioningCollection => firstLeafPartitioning(p.partitionings.head)
case p => p
}
}
val outputPartitioning = firstLeafPartitioning(physicalPlan.outputPartitioning)
Dataset.ofRows(
sparkSession,
LogicalRDD(
logicalPlan.output,
internalRdd,
outputPartitioning,
physicalPlan.outputOrdering,
isStreaming
)(sparkSession)).as[T]
}
}
/**
* Defines an event time watermark for this [[Dataset]]. A watermark tracks a point in time
* before which we assume no more late data is going to arrive.
*
* Spark will use this watermark for several purposes:
* <ul>
* <li>To know when a given time window aggregation can be finalized and thus can be emitted
* when using output modes that do not allow updates.</li>
* <li>To minimize the amount of state that we need to keep for on-going aggregations,
* `mapGroupsWithState` and `dropDuplicates` operators.</li>
* </ul>
* The current watermark is computed by looking at the `MAX(eventTime)` seen across
* all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
* of coordinating this value across partitions, the actual watermark used is only guaranteed
* to be at least `delayThreshold` behind the actual event time. In some cases we may still
* process records that arrive more than `delayThreshold` late.
*
* @param eventTime the name of the column that contains the event time of the row.
* @param delayThreshold the minimum delay to wait to data to arrive late, relative to the latest
* record that has been processed in the form of an interval
* (e.g. "1 minute" or "5 hours"). NOTE: This should not be negative.
*
* @group streaming
* @since 2.1.0
*/
// We only accept an existing column name, not a derived column here as a watermark that is
// defined on a derived column cannot referenced elsewhere in the plan.
def withWatermark(eventTime: String, delayThreshold: String): Dataset[T] = withTypedPlan {
val parsedDelay =
try {
IntervalUtils.stringToInterval(UTF8String.fromString(delayThreshold))
} catch {
case e: IllegalArgumentException =>
throw new AnalysisException(
s"Unable to parse time delay '$delayThreshold'",
cause = Some(e))
}
require(!IntervalUtils.isNegative(parsedDelay),
s"delay threshold ($delayThreshold) should not be negative.")
EliminateEventTimeWatermark(
EventTimeWatermark(UnresolvedAttribute(eventTime), parsedDelay, logicalPlan))
}
/**
* Displays the Dataset in a tabular form. Strings more than 20 characters will be truncated,
* and all cells will be aligned right. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
*
* @group action
* @since 1.6.0
*/
def show(numRows: Int): Unit = show(numRows, truncate = true)
/**
* Displays the top 20 rows of Dataset in a tabular form. Strings more than 20 characters
* will be truncated, and all cells will be aligned right.
*
* @group action
* @since 1.6.0
*/
def show(): Unit = show(20)
/**
* Displays the top 20 rows of Dataset in a tabular form.
*
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
def show(truncate: Boolean): Unit = show(20, truncate)
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
* @param numRows Number of rows to show
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Boolean): Unit = if (truncate) {
println(showString(numRows, truncate = 20))
} else {
println(showString(numRows, truncate = 0))
}
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @group action
* @since 1.6.0
*/
def show(numRows: Int, truncate: Int): Unit = show(numRows, truncate, vertical = false)
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* If `vertical` enabled, this command prints output rows vertically (one line per column value)?
*
* {{{
* -RECORD 0-------------------
* year | 1980
* month | 12
* AVG('Adj Close) | 0.503218
* AVG('Adj Close) | 0.595103
* -RECORD 1-------------------
* year | 1981
* month | 01
* AVG('Adj Close) | 0.523289
* AVG('Adj Close) | 0.570307
* -RECORD 2-------------------
* year | 1982
* month | 02
* AVG('Adj Close) | 0.436504
* AVG('Adj Close) | 0.475256
* -RECORD 3-------------------
* year | 1983
* month | 03
* AVG('Adj Close) | 0.410516
* AVG('Adj Close) | 0.442194
* -RECORD 4-------------------
* year | 1984
* month | 04
* AVG('Adj Close) | 0.450090
* AVG('Adj Close) | 0.483521
* }}}
*
* @param numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @param vertical If set to true, prints output rows vertically (one line per column value).
* @group action
* @since 2.3.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Int, vertical: Boolean): Unit =
println(showString(numRows, truncate, vertical))
// scalastyle:on println
/**
* Returns a [[DataFrameNaFunctions]] for working with missing data.
* {{{
* // Dropping rows containing any null values.
* ds.na.drop()
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def na: DataFrameNaFunctions = new DataFrameNaFunctions(toDF())
/**
* Returns a [[DataFrameStatFunctions]] for working statistic functions support.
* {{{
* // Finding frequent items in column with name 'a'.
* ds.stat.freqItems(Seq("a"))
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def stat: DataFrameStatFunctions = new DataFrameStatFunctions(toDF())
/**
* Join with another `DataFrame`.
*
* Behaves as an INNER JOIN and requires a subsequent join predicate.
*
* @param right Right side of the join operation.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Inner, None, JoinHint.NONE)
}
/**
* Inner equi-join with another `DataFrame` using the given column.
*
* Different from other join functions, the join column will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the column "user_id"
* df1.join(df2, "user_id")
* }}}
*
* @param right Right side of the join operation.
* @param usingColumn Name of the column to join on. This column must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumn: String): DataFrame = {
join(right, Seq(usingColumn))
}
/**
* Inner equi-join with another `DataFrame` using the given columns.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the columns "user_id" and "user_name"
* df1.join(df2, Seq("user_id", "user_name"))
* }}}
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String]): DataFrame = {
join(right, usingColumns, "inner")
}
/**
* Equi-join with another `DataFrame` using the given columns. A cross join with a predicate
* is specified as an inner join. If you would explicitly like to perform a cross join use the
* `crossJoin` method.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `fullouter`, `full_outer`, `left`,
* `leftouter`, `left_outer`, `right`, `rightouter`, `right_outer`,
* `semi`, `leftsemi`, `left_semi`, `anti`, `leftanti`, left_anti`.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String], joinType: String): DataFrame = {
// Analyze the self join. The assumption is that the analyzer will disambiguate left vs right
// by creating a new instance for one of the branch.
val joined = sparkSession.sessionState.executePlan(
Join(logicalPlan, right.logicalPlan, joinType = JoinType(joinType), None, JoinHint.NONE))
.analyzed.asInstanceOf[Join]
withPlan {
Join(
joined.left,
joined.right,
UsingJoin(JoinType(joinType), usingColumns),
None,
JoinHint.NONE)
}
}
/**
* Inner join with another `DataFrame`, using the given join expression.
*
* {{{
* // The following two are equivalent:
* df1.join(df2, $"df1Key" === $"df2Key")
* df1.join(df2).where($"df1Key" === $"df2Key")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column): DataFrame = join(right, joinExprs, "inner")
/**
* Join with another `DataFrame`, using the given join expression. The following performs
* a full outer join between `df1` and `df2`.
*
* {{{
* // Scala:
* import org.apache.spark.sql.functions._
* df1.join(df2, $"df1Key" === $"df2Key", "outer")
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df1.join(df2, col("df1Key").equalTo(col("df2Key")), "outer");
* }}}
*
* @param right Right side of the join.
* @param joinExprs Join expression.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `fullouter`, `full_outer`, `left`,
* `leftouter`, `left_outer`, `right`, `rightouter`, `right_outer`,
* `semi`, `leftsemi`, `left_semi`, `anti`, `leftanti`, left_anti`.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column, joinType: String): DataFrame = {
// Note that in this function, we introduce a hack in the case of self-join to automatically
// resolve ambiguous join conditions into ones that might make sense [SPARK-6231].
// Consider this case: df.join(df, df("key") === df("key"))
// Since df("key") === df("key") is a trivially true condition, this actually becomes a
// cartesian join. However, most likely users expect to perform a self join using "key".
// With that assumption, this hack turns the trivially true condition into equality on join
// keys that are resolved to both sides.
// Trigger analysis so in the case of self-join, the analyzer will clone the plan.
// After the cloning, left and right side will have distinct expression ids.
val plan = withPlan(
Join(logicalPlan, right.logicalPlan, JoinType(joinType), Some(joinExprs.expr), JoinHint.NONE))
.queryExecution.analyzed.asInstanceOf[Join]
// If auto self join alias is disabled, return the plan.
if (!sparkSession.sessionState.conf.dataFrameSelfJoinAutoResolveAmbiguity) {
return withPlan(plan)
}
// If left/right have no output set intersection, return the plan.
val lanalyzed = withPlan(this.logicalPlan).queryExecution.analyzed
val ranalyzed = withPlan(right.logicalPlan).queryExecution.analyzed
if (lanalyzed.outputSet.intersect(ranalyzed.outputSet).isEmpty) {
return withPlan(plan)
}
// Otherwise, find the trivially true predicates and automatically resolves them to both sides.
// By the time we get here, since we have already run analysis, all attributes should've been
// resolved and become AttributeReference.
val cond = plan.condition.map { _.transform {
case catalyst.expressions.EqualTo(a: AttributeReference, b: AttributeReference)
if a.sameRef(b) =>
catalyst.expressions.EqualTo(
withPlan(plan.left).resolve(a.name),
withPlan(plan.right).resolve(b.name))
case catalyst.expressions.EqualNullSafe(a: AttributeReference, b: AttributeReference)
if a.sameRef(b) =>
catalyst.expressions.EqualNullSafe(
withPlan(plan.left).resolve(a.name),
withPlan(plan.right).resolve(b.name))
}}
withPlan {
plan.copy(condition = cond)
}
}
/**
* Explicit cartesian join with another `DataFrame`.
*
* @param right Right side of the join operation.
*
* @note Cartesian joins are very expensive without an extra filter that can be pushed down.
*
* @group untypedrel
* @since 2.1.0
*/
def crossJoin(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Cross, None, JoinHint.NONE)
}
/**
* Joins this Dataset returning a `Tuple2` for each pair where `condition` evaluates to
* true.
*
* This is similar to the relation `join` function with one important difference in the
* result schema. Since `joinWith` preserves objects present on either side of the join, the
* result schema is similarly nested into a tuple under the column names `_1` and `_2`.
*
* This type of join can be useful both for preserving type-safety with the original object
* types as well as working with relational data where either side of the join has column
* names in common.
*
* @param other Right side of the join.
* @param condition Join expression.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `fullouter`,`full_outer`, `left`,
* `leftouter`, `left_outer`, `right`, `rightouter`, `right_outer`.
*
* @group typedrel
* @since 1.6.0
*/
def joinWith[U](other: Dataset[U], condition: Column, joinType: String): Dataset[(T, U)] = {
// Creates a Join node and resolve it first, to get join condition resolved, self-join resolved,
// etc.
val joined = sparkSession.sessionState.executePlan(
Join(
this.logicalPlan,
other.logicalPlan,
JoinType(joinType),
Some(condition.expr),
JoinHint.NONE)).analyzed.asInstanceOf[Join]
if (joined.joinType == LeftSemi || joined.joinType == LeftAnti) {
throw new AnalysisException("Invalid join type in joinWith: " + joined.joinType.sql)
}
implicit val tuple2Encoder: Encoder[(T, U)] =
ExpressionEncoder.tuple(this.exprEnc, other.exprEnc)
val leftResultExpr = {
if (!this.exprEnc.isSerializedAsStructForTopLevel) {
assert(joined.left.output.length == 1)
Alias(joined.left.output.head, "_1")()
} else {
Alias(CreateStruct(joined.left.output), "_1")()
}
}
val rightResultExpr = {
if (!other.exprEnc.isSerializedAsStructForTopLevel) {
assert(joined.right.output.length == 1)
Alias(joined.right.output.head, "_2")()
} else {
Alias(CreateStruct(joined.right.output), "_2")()
}
}
if (joined.joinType.isInstanceOf[InnerLike]) {
// For inner joins, we can directly perform the join and then can project the join
// results into structs. This ensures that data remains flat during shuffles /
// exchanges (unlike the outer join path, which nests the data before shuffling).
withTypedPlan(Project(Seq(leftResultExpr, rightResultExpr), joined))
} else { // outer joins
// For both join sides, combine all outputs into a single column and alias it with "_1
// or "_2", to match the schema for the encoder of the join result.
// Note that we do this before joining them, to enable the join operator to return null
// for one side, in cases like outer-join.
val left = Project(leftResultExpr :: Nil, joined.left)
val right = Project(rightResultExpr :: Nil, joined.right)
// Rewrites the join condition to make the attribute point to correct column/field,
// after we combine the outputs of each join side.
val conditionExpr = joined.condition.get transformUp {
case a: Attribute if joined.left.outputSet.contains(a) =>
if (!this.exprEnc.isSerializedAsStructForTopLevel) {
left.output.head
} else {
val index = joined.left.output.indexWhere(_.exprId == a.exprId)
GetStructField(left.output.head, index)
}
case a: Attribute if joined.right.outputSet.contains(a) =>
if (!other.exprEnc.isSerializedAsStructForTopLevel) {
right.output.head
} else {
val index = joined.right.output.indexWhere(_.exprId == a.exprId)
GetStructField(right.output.head, index)
}
}
withTypedPlan(Join(left, right, joined.joinType, Some(conditionExpr), JoinHint.NONE))
}
}
/**
* Using inner equi-join to join this Dataset returning a `Tuple2` for each pair
* where `condition` evaluates to true.
*
* @param other Right side of the join.
* @param condition Join expression.
*
* @group typedrel
* @since 1.6.0
*/
def joinWith[U](other: Dataset[U], condition: Column): Dataset[(T, U)] = {
joinWith(other, condition, "inner")
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortCol: String, sortCols: String*): Dataset[T] = {
sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortExprs: Column*): Dataset[T] = {
sortInternal(global = false, sortExprs)
}
/**
* Returns a new Dataset sorted by the specified column, all in ascending order.
* {{{
* // The following 3 are equivalent
* ds.sort("sortcol")
* ds.sort($"sortcol")
* ds.sort($"sortcol".asc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortCol: String, sortCols: String*): Dataset[T] = {
sort((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
* Returns a new Dataset sorted by the given expressions. For example:
* {{{
* ds.sort($"col1", $"col2".desc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortExprs: Column*): Dataset[T] = {
sortInternal(global = true, sortExprs)
}
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortCol: String, sortCols: String*): Dataset[T] = sort(sortCol, sortCols : _*)
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortExprs: Column*): Dataset[T] = sort(sortExprs : _*)
/**
* Selects column based on the column name and returns it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def apply(colName: String): Column = col(colName)
/**
* Specifies some hint on the current Dataset. As an example, the following code specifies
* that one of the plan can be broadcasted:
*
* {{{
* df1.join(df2.hint("broadcast"))
* }}}
*
* @group basic
* @since 2.2.0
*/
@scala.annotation.varargs
def hint(name: String, parameters: Any*): Dataset[T] = withTypedPlan {
UnresolvedHint(name, parameters, logicalPlan)
}
/**
* Selects column based on the column name and returns it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def col(colName: String): Column = colName match {
case "*" =>
Column(ResolvedStar(queryExecution.analyzed.output))
case _ =>
if (sqlContext.conf.supportQuotedRegexColumnName) {
colRegex(colName)
} else {
Column(addDataFrameIdToCol(resolve(colName)))
}
}
// Attach the dataset id and column position to the column reference, so that we can detect
// ambiguous self-join correctly. See the rule `DetectAmbiguousSelfJoin`.
// This must be called before we return a `Column` that contains `AttributeReference`.
// Note that, the metadata added here are only avaiable in the analyzer, as the analyzer rule
// `DetectAmbiguousSelfJoin` will remove it.
private def addDataFrameIdToCol(expr: NamedExpression): NamedExpression = {
val newExpr = expr transform {
case a: AttributeReference
if sparkSession.sessionState.conf.getConf(SQLConf.FAIL_AMBIGUOUS_SELF_JOIN_ENABLED) =>
val metadata = new MetadataBuilder()
.withMetadata(a.metadata)
.putLong(Dataset.DATASET_ID_KEY, id)
.putLong(Dataset.COL_POS_KEY, logicalPlan.output.indexWhere(a.semanticEquals))
.build()
a.withMetadata(metadata)
}
newExpr.asInstanceOf[NamedExpression]
}
/**
* Selects column based on the column name specified as a regex and returns it as [[Column]].
* @group untypedrel
* @since 2.3.0
*/
def colRegex(colName: String): Column = {
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
colName match {
case ParserUtils.escapedIdentifier(columnNameRegex) =>
Column(UnresolvedRegex(columnNameRegex, None, caseSensitive))
case ParserUtils.qualifiedEscapedIdentifier(nameParts, columnNameRegex) =>
Column(UnresolvedRegex(columnNameRegex, Some(nameParts), caseSensitive))
case _ =>
Column(addDataFrameIdToCol(resolve(colName)))
}
}
/**
* Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 1.6.0
*/
def as(alias: String): Dataset[T] = withTypedPlan {
SubqueryAlias(alias, logicalPlan)
}
/**
* (Scala-specific) Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 2.0.0
*/
def as(alias: Symbol): Dataset[T] = as(alias.name)
/**
* Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: String): Dataset[T] = as(alias)
/**
* (Scala-specific) Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: Symbol): Dataset[T] = as(alias)
/**
* Selects a set of column based expressions.
* {{{
* ds.select($"colA", $"colB" + 1)
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(cols: Column*): DataFrame = withPlan {
val untypedCols = cols.map {
case typedCol: TypedColumn[_, _] =>
// Checks if a `TypedColumn` has been inserted with
// specific input type and schema by `withInputType`.
val needInputType = typedCol.expr.find {
case ta: TypedAggregateExpression if ta.inputDeserializer.isEmpty => true
case _ => false
}.isDefined
if (!needInputType) {
typedCol
} else {
throw new AnalysisException(s"Typed column $typedCol that needs input type and schema " +
"cannot be passed in untyped `select` API. Use the typed `Dataset.select` API instead.")
}
case other => other
}
Project(untypedCols.map(_.named), logicalPlan)
}
/**
* Selects a set of columns. This is a variant of `select` that can only select
* existing columns using column names (i.e. cannot construct expressions).
*
* {{{
* // The following two are equivalent:
* ds.select("colA", "colB")
* ds.select($"colA", $"colB")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
/**
* Selects a set of SQL expressions. This is a variant of `select` that accepts
* SQL expressions.
*
* {{{
* // The following are equivalent:
* ds.selectExpr("colA", "colB as newName", "abs(colC)")
* ds.select(expr("colA"), expr("colB as newName"), expr("abs(colC)"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def selectExpr(exprs: String*): DataFrame = {
select(exprs.map { expr =>
Column(sparkSession.sessionState.sqlParser.parseExpression(expr))
}: _*)
}
/**
* Returns a new Dataset by computing the given [[Column]] expression for each element.
*
* {{{
* val ds = Seq(1, 2, 3).toDS()
* val newDS = ds.select(expr("value + 1").as[Int])
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def select[U1](c1: TypedColumn[T, U1]): Dataset[U1] = {
implicit val encoder = c1.encoder
val project = Project(c1.withInputType(exprEnc, logicalPlan.output).named :: Nil, logicalPlan)
if (!encoder.isSerializedAsStructForTopLevel) {
new Dataset[U1](sparkSession, project, encoder)
} else {
// Flattens inner fields of U1
new Dataset[Tuple1[U1]](sparkSession, project, ExpressionEncoder.tuple(encoder)).map(_._1)
}
}
/**
* Internal helper function for building typed selects that return tuples. For simplicity and
* code reuse, we do this without the help of the type system and then use helper functions
* that cast appropriately for the user facing interface.
*/
protected def selectUntyped(columns: TypedColumn[_, _]*): Dataset[_] = {
val encoders = columns.map(_.encoder)
val namedColumns =
columns.map(_.withInputType(exprEnc, logicalPlan.output).named)
val execution = new QueryExecution(sparkSession, Project(namedColumns, logicalPlan))
new Dataset(execution, ExpressionEncoder.tuple(encoders))
}
/**
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
def select[U1, U2](c1: TypedColumn[T, U1], c2: TypedColumn[T, U2]): Dataset[(U1, U2)] =
selectUntyped(c1, c2).asInstanceOf[Dataset[(U1, U2)]]
/**
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
def select[U1, U2, U3](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3]): Dataset[(U1, U2, U3)] =
selectUntyped(c1, c2, c3).asInstanceOf[Dataset[(U1, U2, U3)]]
/**
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
def select[U1, U2, U3, U4](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4]): Dataset[(U1, U2, U3, U4)] =
selectUntyped(c1, c2, c3, c4).asInstanceOf[Dataset[(U1, U2, U3, U4)]]
/**
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
def select[U1, U2, U3, U4, U5](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4],
c5: TypedColumn[T, U5]): Dataset[(U1, U2, U3, U4, U5)] =
selectUntyped(c1, c2, c3, c4, c5).asInstanceOf[Dataset[(U1, U2, U3, U4, U5)]]
/**
* Filters rows using the given condition.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(condition: Column): Dataset[T] = withTypedPlan {
Filter(condition.expr, logicalPlan)
}
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.filter("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Filters rows using the given condition. This is an alias for `filter`.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(condition: Column): Dataset[T] = filter(condition)
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.where("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Groups the Dataset using the specified columns, so we can run aggregation on them. See
* [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy($"department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.GroupByType)
}
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup($"department", $"group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube($"department", $"group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.CubeType)
}
/**
* Groups the Dataset using the specified columns, so that we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of groupBy that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy("department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.GroupByType)
}
/**
* (Scala-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
def reduce(func: (T, T) => T): T = withNewRDDExecutionId {
rdd.reduce(func)
}
/**
* (Java-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
def reduce(func: ReduceFunction[T]): T = reduce(func.call(_, _))
/**
* (Scala-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
def groupByKey[K: Encoder](func: T => K): KeyValueGroupedDataset[K, T] = {
val withGroupingKey = AppendColumns(func, logicalPlan)
val executed = sparkSession.sessionState.executePlan(withGroupingKey)
new KeyValueGroupedDataset(
encoderFor[K],
encoderFor[T],
executed,
logicalPlan.output,
withGroupingKey.newColumns)
}
/**
* (Java-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
def groupByKey[K](func: MapFunction[T, K], encoder: Encoder[K]): KeyValueGroupedDataset[K, T] =
groupByKey(func.call(_))(encoder)
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of rollup that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup("department", "group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of cube that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube("department", "group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.CubeType)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg("age" -> "max", "salary" -> "avg")
* ds.groupBy().agg("age" -> "max", "salary" -> "avg")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
groupBy().agg(aggExpr, aggExprs : _*)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* (Java-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: java.util.Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(max($"age"), avg($"salary"))
* ds.groupBy().agg(max($"age"), avg($"salary"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
/**
* Define (named) metrics to observe on the Dataset. This method returns an 'observed' Dataset
* that returns the same result as the input, with the following guarantees:
* <ul>
* <li>It will compute the defined aggregates (metrics) on all the data that is flowing through
* the Dataset at that point.</li>
* <li>It will report the value of the defined aggregate columns as soon as we reach a completion
* point. A completion point is either the end of a query (batch mode) or the end of a streaming
* epoch. The value of the aggregates only reflects the data processed since the previous
* completion point.</li>
* </ul>
* Please note that continuous execution is currently not supported.
*
* The metrics columns must either contain a literal (e.g. lit(42)), or should contain one or
* more aggregate functions (e.g. sum(a) or sum(a + b) + avg(c) - lit(1)). Expressions that
* contain references to the input Dataset's columns must always be wrapped in an aggregate
* function.
*
* A user can observe these metrics by either adding
* [[org.apache.spark.sql.streaming.StreamingQueryListener]] or a
* [[org.apache.spark.sql.util.QueryExecutionListener]] to the spark session.
*
* {{{
* // Monitor the metrics using a listener.
* spark.streams.addListener(new StreamingQueryListener() {
* override def onQueryProgress(event: QueryProgressEvent): Unit = {
* event.progress.observedMetrics.asScala.get("my_event").foreach { row =>
* // Trigger if the number of errors exceeds 5 percent
* val num_rows = row.getAs[Long]("rc")
* val num_error_rows = row.getAs[Long]("erc")
* val ratio = num_error_rows.toDouble / num_rows
* if (ratio > 0.05) {
* // Trigger alert
* }
* }
* }
* def onQueryStarted(event: QueryStartedEvent): Unit = {}
* def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
* })
* // Observe row count (rc) and error row count (erc) in the streaming Dataset
* val observed_ds = ds.observe("my_event", count(lit(1)).as("rc"), count($"error").as("erc"))
* observed_ds.writeStream.format("...").start()
* }}}
*
* @group typedrel
* @since 3.0.0
*/
def observe(name: String, expr: Column, exprs: Column*): Dataset[T] = withTypedPlan {
CollectMetrics(name, (expr +: exprs).map(_.named), logicalPlan)
}
/**
* Returns a new Dataset by taking the first `n` rows. The difference between this function
* and `head` is that `head` is an action and returns an array (by triggering query execution)
* while `limit` returns a new Dataset.
*
* @group typedrel
* @since 2.0.0
*/
def limit(n: Int): Dataset[T] = withTypedPlan {
Limit(Literal(n), logicalPlan)
}
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
*
* This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does
* deduplication of elements), use this function followed by a [[distinct]].
*
* Also as standard in SQL, this function resolves columns by position (not by name):
*
* {{{
* val df1 = Seq((1, 2, 3)).toDF("col0", "col1", "col2")
* val df2 = Seq((4, 5, 6)).toDF("col1", "col2", "col0")
* df1.union(df2).show
*
* // output:
* // +----+----+----+
* // |col0|col1|col2|
* // +----+----+----+
* // | 1| 2| 3|
* // | 4| 5| 6|
* // +----+----+----+
* }}}
*
* Notice that the column positions in the schema aren't necessarily matched with the
* fields in the strongly typed objects in a Dataset. This function resolves columns
* by their positions in the schema, not the fields in the strongly typed objects. Use
* [[unionByName]] to resolve columns by field name in the typed objects.
*
* @group typedrel
* @since 2.0.0
*/
def union(other: Dataset[T]): Dataset[T] = withSetOperator {
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(logicalPlan, other.logicalPlan))
}
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
* This is an alias for `union`.
*
* This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does
* deduplication of elements), use this function followed by a [[distinct]].
*
* Also as standard in SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.0.0
*/
def unionAll(other: Dataset[T]): Dataset[T] = union(other)
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
*
* This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
* union (that does deduplication of elements), use this function followed by a [[distinct]].
*
* The difference between this function and [[union]] is that this function
* resolves columns by name (not by position):
*
* {{{
* val df1 = Seq((1, 2, 3)).toDF("col0", "col1", "col2")
* val df2 = Seq((4, 5, 6)).toDF("col1", "col2", "col0")
* df1.unionByName(df2).show
*
* // output:
* // +----+----+----+
* // |col0|col1|col2|
* // +----+----+----+
* // | 1| 2| 3|
* // | 6| 4| 5|
* // +----+----+----+
* }}}
*
* @group typedrel
* @since 2.3.0
*/
def unionByName(other: Dataset[T]): Dataset[T] = withSetOperator {
// Check column name duplication
val resolver = sparkSession.sessionState.analyzer.resolver
val leftOutputAttrs = logicalPlan.output
val rightOutputAttrs = other.logicalPlan.output
SchemaUtils.checkColumnNameDuplication(
leftOutputAttrs.map(_.name),
"in the left attributes",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
SchemaUtils.checkColumnNameDuplication(
rightOutputAttrs.map(_.name),
"in the right attributes",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
// Builds a project list for `other` based on `logicalPlan` output names
val rightProjectList = leftOutputAttrs.map { lattr =>
rightOutputAttrs.find { rattr => resolver(lattr.name, rattr.name) }.getOrElse {
throw new AnalysisException(
s"""Cannot resolve column name "${lattr.name}" among """ +
s"""(${rightOutputAttrs.map(_.name).mkString(", ")})""")
}
}
// Delegates failure checks to `CheckAnalysis`
val notFoundAttrs = rightOutputAttrs.diff(rightProjectList)
val rightChild = Project(rightProjectList ++ notFoundAttrs, other.logicalPlan)
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(logicalPlan, rightChild))
}
/**
* Returns a new Dataset containing rows only in both this Dataset and another Dataset.
* This is equivalent to `INTERSECT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 1.6.0
*/
def intersect(other: Dataset[T]): Dataset[T] = withSetOperator {
Intersect(logicalPlan, other.logicalPlan, isAll = false)
}
/**
* Returns a new Dataset containing rows only in both this Dataset and another Dataset while
* preserving the duplicates.
* This is equivalent to `INTERSECT ALL` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`. Also as standard
* in SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.4.0
*/
def intersectAll(other: Dataset[T]): Dataset[T] = withSetOperator {
Intersect(logicalPlan, other.logicalPlan, isAll = true)
}
/**
* Returns a new Dataset containing rows in this Dataset but not in another Dataset.
* This is equivalent to `EXCEPT DISTINCT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def except(other: Dataset[T]): Dataset[T] = withSetOperator {
Except(logicalPlan, other.logicalPlan, isAll = false)
}
/**
* Returns a new Dataset containing rows in this Dataset but not in another Dataset while
* preserving the duplicates.
* This is equivalent to `EXCEPT ALL` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`. Also as standard in
* SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.4.0
*/
def exceptAll(other: Dataset[T]): Dataset[T] = withSetOperator {
Except(logicalPlan, other.logicalPlan, isAll = true)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows (without replacement),
* using a user-supplied seed.
*
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
* @param seed Seed for sampling.
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 2.3.0
*/
def sample(fraction: Double, seed: Long): Dataset[T] = {
sample(withReplacement = false, fraction = fraction, seed = seed)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows (without replacement),
* using a random seed.
*
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 2.3.0
*/
def sample(fraction: Double): Dataset[T] = {
sample(withReplacement = false, fraction = fraction)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a user-supplied seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
* @param seed Seed for sampling.
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Long): Dataset[T] = {
withTypedPlan {
Sample(0.0, fraction, withReplacement, seed, logicalPlan)
}
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a random seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
*
* @note This is NOT guaranteed to provide exactly the fraction of the total count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double): Dataset[T] = {
sample(withReplacement, fraction, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* For Java API, use [[randomSplitAsList]].
*
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double], seed: Long): Array[Dataset[T]] = {
require(weights.forall(_ >= 0),
s"Weights must be nonnegative, but got ${weights.mkString("[", ",", "]")}")
require(weights.sum > 0,
s"Sum of weights must be positive, but got ${weights.mkString("[", ",", "]")}")
// It is possible that the underlying dataframe doesn't guarantee the ordering of rows in its
// constituent partitions each time a split is materialized which could result in
// overlapping splits. To prevent this, we explicitly sort each input partition to make the
// ordering deterministic. Note that MapTypes cannot be sorted and are explicitly pruned out
// from the sort order.
val sortOrder = logicalPlan.output
.filter(attr => RowOrdering.isOrderable(attr.dataType))
.map(SortOrder(_, Ascending))
val plan = if (sortOrder.nonEmpty) {
Sort(sortOrder, global = false, logicalPlan)
} else {
// SPARK-12662: If sort order is empty, we materialize the dataset to guarantee determinism
cache()
logicalPlan
}
val sum = weights.sum
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
normalizedCumWeights.sliding(2).map { x =>
new Dataset[T](
sparkSession, Sample(x(0), x(1), withReplacement = false, seed, plan), encoder)
}.toArray
}
/**
* Returns a Java list that contains randomly split Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* @group typedrel
* @since 2.0.0
*/
def randomSplitAsList(weights: Array[Double], seed: Long): java.util.List[Dataset[T]] = {
val values = randomSplit(weights, seed)
java.util.Arrays.asList(values : _*)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double]): Array[Dataset[T]] = {
randomSplit(weights, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights. Provided for the Python Api.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*/
private[spark] def randomSplit(weights: List[Double], seed: Long): Array[Dataset[T]] = {
randomSplit(weights.toArray, seed)
}
/**
* Returns a new Dataset by adding a column or replacing the existing column that has
* the same name.
*
* `column`'s expression must only refer to attributes supplied by this Dataset. It is an
* error to add a column that refers to some other Dataset.
*
* @note this method introduces a projection internally. Therefore, calling it multiple times,
* for instance, via loops in order to add multiple columns can generate big plans which
* can cause performance issues and even `StackOverflowException`. To avoid this,
* use `select` with the multiple columns at once.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumn(colName: String, col: Column): DataFrame = withColumns(Seq(colName), Seq(col))
/**
* Returns a new Dataset by adding columns or replacing the existing columns that has
* the same names.
*/
private[spark] def withColumns(colNames: Seq[String], cols: Seq[Column]): DataFrame = {
require(colNames.size == cols.size,
s"The size of column names: ${colNames.size} isn't equal to " +
s"the size of columns: ${cols.size}")
SchemaUtils.checkColumnNameDuplication(
colNames,
"in given column names",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val columnMap = colNames.zip(cols).toMap
val replacedAndExistingColumns = output.map { field =>
columnMap.find { case (colName, _) =>
resolver(field.name, colName)
} match {
case Some((colName: String, col: Column)) => col.as(colName)
case _ => Column(field)
}
}
val newColumns = columnMap.filter { case (colName, col) =>
!output.exists(f => resolver(f.name, colName))
}.map { case (colName, col) => col.as(colName) }
select(replacedAndExistingColumns ++ newColumns : _*)
}
/**
* Returns a new Dataset by adding columns with metadata.
*/
private[spark] def withColumns(
colNames: Seq[String],
cols: Seq[Column],
metadata: Seq[Metadata]): DataFrame = {
require(colNames.size == metadata.size,
s"The size of column names: ${colNames.size} isn't equal to " +
s"the size of metadata elements: ${metadata.size}")
val newCols = colNames.zip(cols).zip(metadata).map { case ((colName, col), metadata) =>
col.as(colName, metadata)
}
withColumns(colNames, newCols)
}
/**
* Returns a new Dataset by adding a column with metadata.
*/
private[spark] def withColumn(colName: String, col: Column, metadata: Metadata): DataFrame =
withColumns(Seq(colName), Seq(col), Seq(metadata))
/**
* Returns a new Dataset with a column renamed.
* This is a no-op if schema doesn't contain existingName.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumnRenamed(existingName: String, newName: String): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val shouldRename = output.exists(f => resolver(f.name, existingName))
if (shouldRename) {
val columns = output.map { col =>
if (resolver(col.name, existingName)) {
Column(col).as(newName)
} else {
Column(col)
}
}
select(columns : _*)
} else {
toDF()
}
}
/**
* Returns a new Dataset with a column dropped. This is a no-op if schema doesn't contain
* column name.
*
* This method can only be used to drop top level columns. the colName string is treated
* literally without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(colName: String): DataFrame = {
drop(Seq(colName) : _*)
}
/**
* Returns a new Dataset with columns dropped.
* This is a no-op if schema doesn't contain column name(s).
*
* This method can only be used to drop top level columns. the colName string is treated literally
* without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def drop(colNames: String*): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val remainingCols = allColumns.filter { attribute =>
colNames.forall(n => !resolver(attribute.name, n))
}.map(attribute => Column(attribute))
if (remainingCols.size == allColumns.size) {
toDF()
} else {
this.select(remainingCols: _*)
}
}
/**
* Returns a new Dataset with a column dropped.
* This version of drop accepts a [[Column]] rather than a name.
* This is a no-op if the Dataset doesn't have a column
* with an equivalent expression.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(col: Column): DataFrame = {
val expression = col match {
case Column(u: UnresolvedAttribute) =>
queryExecution.analyzed.resolveQuoted(
u.name, sparkSession.sessionState.analyzer.resolver).getOrElse(u)
case Column(expr: Expression) => expr
}
val attrs = this.logicalPlan.output
val colsAfterDrop = attrs.filter { attr =>
!attr.semanticEquals(expression)
}.map(attr => Column(attr))
select(colsAfterDrop : _*)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `distinct`.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(): Dataset[T] = dropDuplicates(this.columns)
/**
* (Scala-specific) Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Seq[String]): Dataset[T] = withTypedPlan {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val groupCols = colNames.toSet.toSeq.flatMap { (colName: String) =>
// It is possibly there are more than one columns with the same name,
// so we call filter instead of find.
val cols = allColumns.filter(col => resolver(col.name, colName))
if (cols.isEmpty) {
throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
cols
}
Deduplicate(groupCols, logicalPlan)
}
/**
* Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Array[String]): Dataset[T] = dropDuplicates(colNames.toSeq)
/**
* Returns a new [[Dataset]] with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def dropDuplicates(col1: String, cols: String*): Dataset[T] = {
val colNames: Seq[String] = col1 +: cols
dropDuplicates(colNames)
}
/**
* Computes basic statistics for numeric and string columns, including count, mean, stddev, min,
* and max. If no columns are given, this function computes statistics for all numerical or
* string columns.
*
* This function is meant for exploratory data analysis, as we make no guarantee about the
* backward compatibility of the schema of the resulting Dataset. If you want to
* programmatically compute summary statistics, use the `agg` function instead.
*
* {{{
* ds.describe("age", "height").show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // mean 53.3 178.05
* // stddev 11.6 15.7
* // min 18.0 163.0
* // max 92.0 192.0
* }}}
*
* Use [[summary]] for expanded statistics and control over which statistics to compute.
*
* @param cols Columns to compute statistics on.
*
* @group action
* @since 1.6.0
*/
@scala.annotation.varargs
def describe(cols: String*): DataFrame = {
val selected = if (cols.isEmpty) this else select(cols.head, cols.tail: _*)
selected.summary("count", "mean", "stddev", "min", "max")
}
/**
* Computes specified statistics for numeric and string columns. Available statistics are:
* <ul>
* <li>count</li>
* <li>mean</li>
* <li>stddev</li>
* <li>min</li>
* <li>max</li>
* <li>arbitrary approximate percentiles specified as a percentage (e.g. 75%)</li>
* </ul>
*
* If no statistics are given, this function computes count, mean, stddev, min,
* approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
*
* This function is meant for exploratory data analysis, as we make no guarantee about the
* backward compatibility of the schema of the resulting Dataset. If you want to
* programmatically compute summary statistics, use the `agg` function instead.
*
* {{{
* ds.summary().show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // mean 53.3 178.05
* // stddev 11.6 15.7
* // min 18.0 163.0
* // 25% 24.0 176.0
* // 50% 24.0 176.0
* // 75% 32.0 180.0
* // max 92.0 192.0
* }}}
*
* {{{
* ds.summary("count", "min", "25%", "75%", "max").show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // min 18.0 163.0
* // 25% 24.0 176.0
* // 75% 32.0 180.0
* // max 92.0 192.0
* }}}
*
* To do a summary for specific columns first select them:
*
* {{{
* ds.select("age", "height").summary().show()
* }}}
*
* See also [[describe]] for basic statistics.
*
* @param statistics Statistics from above list to be computed.
*
* @group action
* @since 2.3.0
*/
@scala.annotation.varargs
def summary(statistics: String*): DataFrame = StatFunctions.summary(this, statistics.toSeq)
/**
* Returns the first `n` rows.
*
* @note this method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @group action
* @since 1.6.0
*/
def head(n: Int): Array[T] = withAction("head", limit(n).queryExecution)(collectFromPlan)
/**
* Returns the first row.
* @group action
* @since 1.6.0
*/
def head(): T = head(1).head
/**
* Returns the first row. Alias for head().
* @group action
* @since 1.6.0
*/
def first(): T = head()
/**
* Concise syntax for chaining custom transformations.
* {{{
* def featurize(ds: Dataset[T]): Dataset[U] = ...
*
* ds
* .transform(featurize)
* .transform(...)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def transform[U](t: Dataset[T] => Dataset[U]): Dataset[U] = t(this)
/**
* (Scala-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
def filter(func: T => Boolean): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* (Java-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
def filter(func: FilterFunction[T]): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
def map[U : Encoder](func: T => U): Dataset[U] = withTypedPlan {
MapElements[T, U](func, logicalPlan)
}
/**
* (Java-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
def map[U](func: MapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
implicit val uEnc = encoder
withTypedPlan(MapElements[T, U](func, logicalPlan))
}
/**
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
def mapPartitions[U : Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
new Dataset[U](
sparkSession,
MapPartitions[T, U](func, logicalPlan),
implicitly[Encoder[U]])
}
/**
* (Java-specific)
* Returns a new Dataset that contains the result of applying `f` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
def mapPartitions[U](f: MapPartitionsFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (Iterator[T]) => Iterator[U] = x => f.call(x.asJava).asScala
mapPartitions(func)(encoder)
}
/**
* Returns a new `DataFrame` that contains the result of applying a serialized R function
* `func` to each partition.
*/
private[sql] def mapPartitionsInR(
func: Array[Byte],
packageNames: Array[Byte],
broadcastVars: Array[Broadcast[Object]],
schema: StructType): DataFrame = {
val rowEncoder = encoder.asInstanceOf[ExpressionEncoder[Row]]
Dataset.ofRows(
sparkSession,
MapPartitionsInR(func, packageNames, broadcastVars, schema, rowEncoder, logicalPlan))
}
/**
* Applies a Scalar iterator Pandas UDF to each partition. The user-defined function
* defines a transformation: `iter(pandas.DataFrame)` -> `iter(pandas.DataFrame)`.
* Each partition is each iterator consisting of DataFrames as batches.
*
* This function uses Apache Arrow as serialization format between Java executors and Python
* workers.
*/
private[sql] def mapInPandas(func: PythonUDF): DataFrame = {
Dataset.ofRows(
sparkSession,
MapInPandas(
func,
func.dataType.asInstanceOf[StructType].toAttributes,
logicalPlan))
}
/**
* (Scala-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
def flatMap[U : Encoder](func: T => TraversableOnce[U]): Dataset[U] =
mapPartitions(_.flatMap(func))
/**
* (Java-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
def flatMap[U](f: FlatMapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (T) => Iterator[U] = x => f.call(x).asScala
flatMap(func)(encoder)
}
/**
* Applies a function `f` to all rows.
*
* @group action
* @since 1.6.0
*/
def foreach(f: T => Unit): Unit = withNewRDDExecutionId {
rdd.foreach(f)
}
/**
* (Java-specific)
* Runs `func` on each element of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreach(func: ForeachFunction[T]): Unit = foreach(func.call(_))
/**
* Applies a function `f` to each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(f: Iterator[T] => Unit): Unit = withNewRDDExecutionId {
rdd.foreachPartition(f)
}
/**
* (Java-specific)
* Runs `func` on each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(func: ForeachPartitionFunction[T]): Unit = {
foreachPartition((it: Iterator[T]) => func.call(it.asJava))
}
/**
* Returns the first `n` rows in the Dataset.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def take(n: Int): Array[T] = head(n)
/**
* Returns the last `n` rows in the Dataset.
*
* Running tail requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 3.0.0
*/
def tail(n: Int): Array[T] = withAction(
"tail", withTypedPlan(Tail(Literal(n), logicalPlan)).queryExecution)(collectFromPlan)
/**
* Returns the first `n` rows in the Dataset as a list.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def takeAsList(n: Int): java.util.List[T] = java.util.Arrays.asList(take(n) : _*)
/**
* Returns an array that contains all rows in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* For Java API, use [[collectAsList]].
*
* @group action
* @since 1.6.0
*/
def collect(): Array[T] = withAction("collect", queryExecution)(collectFromPlan)
/**
* Returns a Java list that contains all rows in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def collectAsList(): java.util.List[T] = withAction("collectAsList", queryExecution) { plan =>
val values = collectFromPlan(plan)
java.util.Arrays.asList(values : _*)
}
/**
* Returns an iterator that contains all rows in this Dataset.
*
* The iterator will consume as much memory as the largest partition in this Dataset.
*
* @note this results in multiple Spark jobs, and if the input Dataset is the result
* of a wide transformation (e.g. join with different partitioners), to avoid
* recomputing the input Dataset should be cached first.
*
* @group action
* @since 2.0.0
*/
def toLocalIterator(): java.util.Iterator[T] = {
withAction("toLocalIterator", queryExecution) { plan =>
// `ExpressionEncoder` is not thread-safe, here we create a new encoder.
val enc = resolvedEnc.copy()
plan.executeToIterator().map(enc.fromRow).asJava
}
}
/**
* Returns the number of rows in the Dataset.
* @group action
* @since 1.6.0
*/
def count(): Long = withAction("count", groupBy().count().queryExecution) { plan =>
plan.executeCollect().head.getLong(0)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions.
*
* @group typedrel
* @since 1.6.0
*/
def repartition(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = true, logicalPlan)
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions into
* `numPartitions`. The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(numPartitions: Int, partitionExprs: Column*): Dataset[T] = {
// The underlying `LogicalPlan` operator special-cases all-`SortOrder` arguments.
// However, we don't want to complicate the semantics of this API method.
// Instead, let's give users a friendly error message, pointing them to the new method.
val sortOrders = partitionExprs.filter(_.expr.isInstanceOf[SortOrder])
if (sortOrders.nonEmpty) throw new IllegalArgumentException(
s"""Invalid partitionExprs specified: $sortOrders
|For range partitioning use repartitionByRange(...) instead.
""".stripMargin)
withTypedPlan {
RepartitionByExpression(partitionExprs.map(_.expr), logicalPlan, numPartitions)
}
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions, using
* `spark.sql.shuffle.partitions` as number of partitions.
* The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(partitionExprs: Column*): Dataset[T] = {
repartition(sparkSession.sessionState.conf.numShufflePartitions, partitionExprs: _*)
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions into
* `numPartitions`. The resulting Dataset is range partitioned.
*
* At least one partition-by expression must be specified.
* When no explicit sort order is specified, "ascending nulls first" is assumed.
* Note, the rows are not sorted in each partition of the resulting Dataset.
*
*
* Note that due to performance reasons this method uses sampling to estimate the ranges.
* Hence, the output may not be consistent, since sampling can return different values.
* The sample size can be controlled by the config
* `spark.sql.execution.rangeExchange.sampleSizePerPartition`.
*
* @group typedrel
* @since 2.3.0
*/
@scala.annotation.varargs
def repartitionByRange(numPartitions: Int, partitionExprs: Column*): Dataset[T] = {
require(partitionExprs.nonEmpty, "At least one partition-by expression must be specified.")
val sortOrder: Seq[SortOrder] = partitionExprs.map(_.expr match {
case expr: SortOrder => expr
case expr: Expression => SortOrder(expr, Ascending)
})
withTypedPlan {
RepartitionByExpression(sortOrder, logicalPlan, numPartitions)
}
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions, using
* `spark.sql.shuffle.partitions` as number of partitions.
* The resulting Dataset is range partitioned.
*
* At least one partition-by expression must be specified.
* When no explicit sort order is specified, "ascending nulls first" is assumed.
* Note, the rows are not sorted in each partition of the resulting Dataset.
*
* Note that due to performance reasons this method uses sampling to estimate the ranges.
* Hence, the output may not be consistent, since sampling can return different values.
* The sample size can be controlled by the config
* `spark.sql.execution.rangeExchange.sampleSizePerPartition`.
*
* @group typedrel
* @since 2.3.0
*/
@scala.annotation.varargs
def repartitionByRange(partitionExprs: Column*): Dataset[T] = {
repartitionByRange(sparkSession.sessionState.conf.numShufflePartitions, partitionExprs: _*)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions, when the fewer partitions
* are requested. If a larger number of partitions is requested, it will stay at the current
* number of partitions. Similar to coalesce defined on an `RDD`, this operation results in
* a narrow dependency, e.g. if you go from 1000 partitions to 100 partitions, there will not
* be a shuffle, instead each of the 100 new partitions will claim 10 of the current partitions.
*
* However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
* this may result in your computation taking place on fewer nodes than
* you like (e.g. one node in the case of numPartitions = 1). To avoid this,
* you can call repartition. This will add a shuffle step, but means the
* current upstream partitions will be executed in parallel (per whatever
* the current partitioning is).
*
* @group typedrel
* @since 1.6.0
*/
def coalesce(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = false, logicalPlan)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `dropDuplicates`.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def distinct(): Dataset[T] = dropDuplicates()
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def persist(): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this)
this
}
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def cache(): this.type = persist()
/**
* Persist this Dataset with the given storage level.
* @param newLevel One of: `MEMORY_ONLY`, `MEMORY_AND_DISK`, `MEMORY_ONLY_SER`,
* `MEMORY_AND_DISK_SER`, `DISK_ONLY`, `MEMORY_ONLY_2`,
* `MEMORY_AND_DISK_2`, etc.
*
* @group basic
* @since 1.6.0
*/
def persist(newLevel: StorageLevel): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this, None, newLevel)
this
}
/**
* Get the Dataset's current storage level, or StorageLevel.NONE if not persisted.
*
* @group basic
* @since 2.1.0
*/
def storageLevel: StorageLevel = {
sparkSession.sharedState.cacheManager.lookupCachedData(this).map { cachedData =>
cachedData.cachedRepresentation.cacheBuilder.storageLevel
}.getOrElse(StorageLevel.NONE)
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
* This will not un-persist any cached data that is built upon this Dataset.
*
* @param blocking Whether to block until all blocks are deleted.
*
* @group basic
* @since 1.6.0
*/
def unpersist(blocking: Boolean): this.type = {
sparkSession.sharedState.cacheManager.uncacheQuery(
sparkSession, logicalPlan, cascade = false, blocking)
this
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
* This will not un-persist any cached data that is built upon this Dataset.
*
* @group basic
* @since 1.6.0
*/
def unpersist(): this.type = unpersist(blocking = false)
// Represents the `QueryExecution` used to produce the content of the Dataset as an `RDD`.
@transient private lazy val rddQueryExecution: QueryExecution = {
val deserialized = CatalystSerde.deserialize[T](logicalPlan)
sparkSession.sessionState.executePlan(deserialized)
}
/**
* Represents the content of the Dataset as an `RDD` of `T`.
*
* @group basic
* @since 1.6.0
*/
lazy val rdd: RDD[T] = {
val objectType = exprEnc.deserializer.dataType
rddQueryExecution.toRdd.mapPartitions { rows =>
rows.map(_.get(0, objectType).asInstanceOf[T])
}
}
/**
* Returns the content of the Dataset as a `JavaRDD` of `T`s.
* @group basic
* @since 1.6.0
*/
def toJavaRDD: JavaRDD[T] = rdd.toJavaRDD()
/**
* Returns the content of the Dataset as a `JavaRDD` of `T`s.
* @group basic
* @since 1.6.0
*/
def javaRDD: JavaRDD[T] = toJavaRDD
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* Local temporary view is session-scoped. Its lifetime is the lifetime of the session that
* created it, i.e. it will be automatically dropped when the session terminates. It's not
* tied to any databases, i.e. we can't use `db1.view1` to reference a local temporary view.
*
* @throws AnalysisException if the view name is invalid or already exists
*
* @group basic
* @since 2.0.0
*/
@throws[AnalysisException]
def createTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = false)
}
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* @group basic
* @since 2.0.0
*/
def createOrReplaceTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = true, global = false)
}
/**
* Creates a global temporary view using the given name. The lifetime of this
* temporary view is tied to this Spark application.
*
* Global temporary view is cross-session. Its lifetime is the lifetime of the Spark application,
* i.e. it will be automatically dropped when the application terminates. It's tied to a system
* preserved database `global_temp`, and we must use the qualified name to refer a global temp
* view, e.g. `SELECT * FROM global_temp.view1`.
*
* @throws AnalysisException if the view name is invalid or already exists
*
* @group basic
* @since 2.1.0
*/
@throws[AnalysisException]
def createGlobalTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = true)
}
/**
* Creates or replaces a global temporary view using the given name. The lifetime of this
* temporary view is tied to this Spark application.
*
* Global temporary view is cross-session. Its lifetime is the lifetime of the Spark application,
* i.e. it will be automatically dropped when the application terminates. It's tied to a system
* preserved database `global_temp`, and we must use the qualified name to refer a global temp
* view, e.g. `SELECT * FROM global_temp.view1`.
*
* @group basic
* @since 2.2.0
*/
def createOrReplaceGlobalTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = true, global = true)
}
private def createTempViewCommand(
viewName: String,
replace: Boolean,
global: Boolean): CreateViewCommand = {
val viewType = if (global) GlobalTempView else LocalTempView
val tableIdentifier = try {
sparkSession.sessionState.sqlParser.parseTableIdentifier(viewName)
} catch {
case _: ParseException => throw new AnalysisException(s"Invalid view name: $viewName")
}
CreateViewCommand(
name = tableIdentifier,
userSpecifiedColumns = Nil,
comment = None,
properties = Map.empty,
originalText = None,
child = logicalPlan,
allowExisting = false,
replace = replace,
viewType = viewType)
}
/**
* Interface for saving the content of the non-streaming Dataset out into external storage.
*
* @group basic
* @since 1.6.0
*/
def write: DataFrameWriter[T] = {
if (isStreaming) {
logicalPlan.failAnalysis(
"'write' can not be called on streaming Dataset/DataFrame")
}
new DataFrameWriter[T](this)
}
/**
* Create a write configuration builder for v2 sources.
*
* This builder is used to configure and execute write operations. For example, to append to an
* existing table, run:
*
* {{{
* df.writeTo("catalog.db.table").append()
* }}}
*
* This can also be used to create or replace existing tables:
*
* {{{
* df.writeTo("catalog.db.table").partitionedBy($"col").createOrReplace()
* }}}
*
* @group basic
* @since 3.0.0
*/
def writeTo(table: String): DataFrameWriterV2[T] = {
// TODO: streaming could be adapted to use this interface
if (isStreaming) {
logicalPlan.failAnalysis(
"'writeTo' can not be called on streaming Dataset/DataFrame")
}
new DataFrameWriterV2[T](table, this)
}
/**
* Interface for saving the content of the streaming Dataset out into external storage.
*
* @group basic
* @since 2.0.0
*/
def writeStream: DataStreamWriter[T] = {
if (!isStreaming) {
logicalPlan.failAnalysis(
"'writeStream' can be called only on streaming Dataset/DataFrame")
}
new DataStreamWriter[T](this)
}
/**
* Returns the content of the Dataset as a Dataset of JSON strings.
* @since 2.0.0
*/
def toJSON: Dataset[String] = {
val rowSchema = this.schema
val sessionLocalTimeZone = sparkSession.sessionState.conf.sessionLocalTimeZone
mapPartitions { iter =>
val writer = new CharArrayWriter()
// create the Generator without separator inserted between 2 records
val gen = new JacksonGenerator(rowSchema, writer,
new JSONOptions(Map.empty[String, String], sessionLocalTimeZone))
new Iterator[String] {
override def hasNext: Boolean = iter.hasNext
override def next(): String = {
gen.write(exprEnc.toRow(iter.next()))
gen.flush()
val json = writer.toString
if (hasNext) {
writer.reset()
} else {
gen.close()
}
json
}
}
} (Encoders.STRING)
}
/**
* Returns a best-effort snapshot of the files that compose this Dataset. This method simply
* asks each constituent BaseRelation for its respective files and takes the union of all results.
* Depending on the source relations, this may not find all input files. Duplicates are removed.
*
* @group basic
* @since 2.0.0
*/
def inputFiles: Array[String] = {
val files: Seq[String] = queryExecution.optimizedPlan.collect {
case LogicalRelation(fsBasedRelation: FileRelation, _, _, _) =>
fsBasedRelation.inputFiles
case fr: FileRelation =>
fr.inputFiles
case r: HiveTableRelation =>
r.tableMeta.storage.locationUri.map(_.toString).toArray
case DataSourceV2ScanRelation(table: FileTable, _, _) =>
table.fileIndex.inputFiles
}.flatten
files.toSet.toArray
}
/**
* Returns `true` when the logical query plans inside both [[Dataset]]s are equal and
* therefore return same results.
*
* @note The equality comparison here is simplified by tolerating the cosmetic differences
* such as attribute names.
* @note This API can compare both [[Dataset]]s very fast but can still return `false` on
* the [[Dataset]] that return the same results, for instance, from different plans. Such
* false negative semantic can be useful when caching as an example.
* @since 3.1.0
*/
@DeveloperApi
def sameSemantics(other: Dataset[T]): Boolean = {
queryExecution.analyzed.sameResult(other.queryExecution.analyzed)
}
/**
* Returns a `hashCode` of the logical query plan against this [[Dataset]].
*
* @note Unlike the standard `hashCode`, the hash is calculated against the query plan
* simplified by tolerating the cosmetic differences such as attribute names.
* @since 3.1.0
*/
@DeveloperApi
def semanticHash(): Int = {
queryExecution.analyzed.semanticHash()
}
////////////////////////////////////////////////////////////////////////////
// For Python API
////////////////////////////////////////////////////////////////////////////
/**
* Converts a JavaRDD to a PythonRDD.
*/
private[sql] def javaToPython: JavaRDD[Array[Byte]] = {
val structType = schema // capture it for closure
val rdd = queryExecution.toRdd.map(EvaluatePython.toJava(_, structType))
EvaluatePython.javaToPython(rdd)
}
private[sql] def collectToPython(): Array[Any] = {
EvaluatePython.registerPicklers()
withAction("collectToPython", queryExecution) { plan =>
val toJava: (Any) => Any = EvaluatePython.toJava(_, schema)
val iter: Iterator[Array[Byte]] = new SerDeUtil.AutoBatchedPickler(
plan.executeCollect().iterator.map(toJava))
PythonRDD.serveIterator(iter, "serve-DataFrame")
}
}
private[sql] def tailToPython(n: Int): Array[Any] = {
EvaluatePython.registerPicklers()
withAction("tailToPython", queryExecution) { plan =>
val toJava: (Any) => Any = EvaluatePython.toJava(_, schema)
val iter: Iterator[Array[Byte]] = new SerDeUtil.AutoBatchedPickler(
plan.executeTail(n).iterator.map(toJava))
PythonRDD.serveIterator(iter, "serve-DataFrame")
}
}
private[sql] def getRowsToPython(
_numRows: Int,
truncate: Int): Array[Any] = {
EvaluatePython.registerPicklers()
val numRows = _numRows.max(0).min(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH - 1)
val rows = getRows(numRows, truncate).map(_.toArray).toArray
val toJava: (Any) => Any = EvaluatePython.toJava(_, ArrayType(ArrayType(StringType)))
val iter: Iterator[Array[Byte]] = new SerDeUtil.AutoBatchedPickler(
rows.iterator.map(toJava))
PythonRDD.serveIterator(iter, "serve-GetRows")
}
/**
* Collect a Dataset as Arrow batches and serve stream to SparkR. It sends
* arrow batches in an ordered manner with buffering. This is inevitable
* due to missing R API that reads batches from socket directly. See ARROW-4512.
* Eventually, this code should be deduplicated by `collectAsArrowToPython`.
*/
private[sql] def collectAsArrowToR(): Array[Any] = {
val timeZoneId = sparkSession.sessionState.conf.sessionLocalTimeZone
withAction("collectAsArrowToR", queryExecution) { plan =>
RRDD.serveToStream("serve-Arrow") { outputStream =>
val buffer = new ByteArrayOutputStream()
val out = new DataOutputStream(outputStream)
val batchWriter = new ArrowBatchStreamWriter(schema, buffer, timeZoneId)
val arrowBatchRdd = toArrowBatchRdd(plan)
val numPartitions = arrowBatchRdd.partitions.length
// Store collection results for worst case of 1 to N-1 partitions
val results = new Array[Array[Array[Byte]]](numPartitions - 1)
var lastIndex = -1 // index of last partition written
// Handler to eagerly write partitions to Python in order
def handlePartitionBatches(index: Int, arrowBatches: Array[Array[Byte]]): Unit = {
// If result is from next partition in order
if (index - 1 == lastIndex) {
batchWriter.writeBatches(arrowBatches.iterator)
lastIndex += 1
// Write stored partitions that come next in order
while (lastIndex < results.length && results(lastIndex) != null) {
batchWriter.writeBatches(results(lastIndex).iterator)
results(lastIndex) = null
lastIndex += 1
}
// After last batch, end the stream
if (lastIndex == results.length) {
batchWriter.end()
val batches = buffer.toByteArray
out.writeInt(batches.length)
out.write(batches)
}
} else {
// Store partitions received out of order
results(index - 1) = arrowBatches
}
}
sparkSession.sparkContext.runJob(
arrowBatchRdd,
(ctx: TaskContext, it: Iterator[Array[Byte]]) => it.toArray,
0 until numPartitions,
handlePartitionBatches)
}
}
}
/**
* Collect a Dataset as Arrow batches and serve stream to PySpark. It sends
* arrow batches in an un-ordered manner without buffering, and then batch order
* information at the end. The batches should be reordered at Python side.
*/
private[sql] def collectAsArrowToPython: Array[Any] = {
val timeZoneId = sparkSession.sessionState.conf.sessionLocalTimeZone
withAction("collectAsArrowToPython", queryExecution) { plan =>
PythonRDD.serveToStream("serve-Arrow") { outputStream =>
val out = new DataOutputStream(outputStream)
val batchWriter = new ArrowBatchStreamWriter(schema, out, timeZoneId)
// Batches ordered by (index of partition, batch index in that partition) tuple
val batchOrder = ArrayBuffer.empty[(Int, Int)]
// Handler to eagerly write batches to Python as they arrive, un-ordered
val handlePartitionBatches = (index: Int, arrowBatches: Array[Array[Byte]]) =>
if (arrowBatches.nonEmpty) {
// Write all batches (can be more than 1) in the partition, store the batch order tuple
batchWriter.writeBatches(arrowBatches.iterator)
arrowBatches.indices.foreach {
partitionBatchIndex => batchOrder.append((index, partitionBatchIndex))
}
}
Utils.tryWithSafeFinally {
val arrowBatchRdd = toArrowBatchRdd(plan)
sparkSession.sparkContext.runJob(
arrowBatchRdd,
(it: Iterator[Array[Byte]]) => it.toArray,
handlePartitionBatches)
} {
// After processing all partitions, end the batch stream
batchWriter.end()
// Write batch order indices
out.writeInt(batchOrder.length)
// Sort by (index of partition, batch index in that partition) tuple to get the
// overall_batch_index from 0 to N-1 batches, which can be used to put the
// transferred batches in the correct order
batchOrder.zipWithIndex.sortBy(_._1).foreach { case (_, overallBatchIndex) =>
out.writeInt(overallBatchIndex)
}
}
}
}
}
private[sql] def toPythonIterator(prefetchPartitions: Boolean = false): Array[Any] = {
withNewExecutionId {
PythonRDD.toLocalIteratorAndServe(javaToPython.rdd, prefetchPartitions)
}
}
////////////////////////////////////////////////////////////////////////////
// Private Helpers
////////////////////////////////////////////////////////////////////////////
/**
* Wrap a Dataset action to track all Spark jobs in the body so that we can connect them with
* an execution.
*/
private def withNewExecutionId[U](body: => U): U = {
SQLExecution.withNewExecutionId(queryExecution)(body)
}
/**
* Wrap an action of the Dataset's RDD to track all Spark jobs in the body so that we can connect
* them with an execution. Before performing the action, the metrics of the executed plan will be
* reset.
*/
private def withNewRDDExecutionId[U](body: => U): U = {
SQLExecution.withNewExecutionId(rddQueryExecution) {
rddQueryExecution.executedPlan.resetMetrics()
body
}
}
/**
* Wrap a Dataset action to track the QueryExecution and time cost, then report to the
* user-registered callback functions.
*/
private def withAction[U](name: String, qe: QueryExecution)(action: SparkPlan => U) = {
SQLExecution.withNewExecutionId(qe, Some(name)) {
qe.executedPlan.resetMetrics()
action(qe.executedPlan)
}
}
/**
* Collect all elements from a spark plan.
*/
private def collectFromPlan(plan: SparkPlan): Array[T] = {
// `ExpressionEncoder` is not thread-safe, here we create a new encoder.
val enc = resolvedEnc.copy()
plan.executeCollect().map(enc.fromRow)
}
private def sortInternal(global: Boolean, sortExprs: Seq[Column]): Dataset[T] = {
val sortOrder: Seq[SortOrder] = sortExprs.map { col =>
col.expr match {
case expr: SortOrder =>
expr
case expr: Expression =>
SortOrder(expr, Ascending)
}
}
withTypedPlan {
Sort(sortOrder, global = global, logicalPlan)
}
}
/** A convenient function to wrap a logical plan and produce a DataFrame. */
@inline private def withPlan(logicalPlan: LogicalPlan): DataFrame = {
Dataset.ofRows(sparkSession, logicalPlan)
}
/** A convenient function to wrap a logical plan and produce a Dataset. */
@inline private def withTypedPlan[U : Encoder](logicalPlan: LogicalPlan): Dataset[U] = {
Dataset(sparkSession, logicalPlan)
}
/** A convenient function to wrap a set based logical plan and produce a Dataset. */
@inline private def withSetOperator[U : Encoder](logicalPlan: LogicalPlan): Dataset[U] = {
if (classTag.runtimeClass.isAssignableFrom(classOf[Row])) {
// Set operators widen types (change the schema), so we cannot reuse the row encoder.
Dataset.ofRows(sparkSession, logicalPlan).asInstanceOf[Dataset[U]]
} else {
Dataset(sparkSession, logicalPlan)
}
}
/** Convert to an RDD of serialized ArrowRecordBatches. */
private[sql] def toArrowBatchRdd(plan: SparkPlan): RDD[Array[Byte]] = {
val schemaCaptured = this.schema
val maxRecordsPerBatch = sparkSession.sessionState.conf.arrowMaxRecordsPerBatch
val timeZoneId = sparkSession.sessionState.conf.sessionLocalTimeZone
plan.execute().mapPartitionsInternal { iter =>
val context = TaskContext.get()
ArrowConverters.toBatchIterator(
iter, schemaCaptured, maxRecordsPerBatch, timeZoneId, context)
}
}
// This is only used in tests, for now.
private[sql] def toArrowBatchRdd: RDD[Array[Byte]] = {
toArrowBatchRdd(queryExecution.executedPlan)
}
}
|
goldmedal/spark
|
sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
|
Scala
|
apache-2.0
| 129,354 |
package com.github.mauricio.async.db.mysql
import org.specs2.mutable.Specification
import java.util.UUID
import java.nio.ByteBuffer
import io.netty.buffer.Unpooled
import io.netty.util.CharsetUtil
import com.github.mauricio.async.db.RowData
class BinaryColumnsSpec extends Specification with ConnectionHelper {
"connection" should {
"correctly load fields as byte arrays" in {
val create = """CREATE TEMPORARY TABLE t (
| id BIGINT NOT NULL AUTO_INCREMENT,
| uuid BINARY(36) NOT NULL,
| address VARBINARY(16) NOT NULL,
| PRIMARY KEY (id),
| INDEX idx_t_uuid (uuid),
| INDEX idx_t_address (address)
|);""".stripMargin
val uuid = UUID.randomUUID().toString
val host = "127.0.0.1"
val preparedInsert = "INSERT INTO t (uuid, address) VALUES (?, ?)"
val insert =
s"INSERT INTO t (uuid, address) VALUES ('${uuid}', '${host}')"
val select = "SELECT * FROM t"
withConnection { connection =>
executeQuery(connection, create)
executeQuery(connection, insert)
val result = executeQuery(connection, select).rows.get
compareBytes(result(0), "uuid", uuid)
compareBytes(result(0), "address", host)
executePreparedStatement(connection, preparedInsert, uuid, host)
val otherResult = executePreparedStatement(connection, select).rows.get
compareBytes(otherResult(1), "uuid", uuid)
compareBytes(otherResult(1), "address", host)
}
}
"support BINARY type" in {
val create =
"""CREATE TEMPORARY TABLE POSTS (
| id INT NOT NULL AUTO_INCREMENT,
| binary_column BINARY(20),
| primary key (id))
""".stripMargin
val insert = "INSERT INTO POSTS (binary_column) VALUES (?)"
val select = "SELECT * FROM POSTS"
val bytes = (1 to 10).map(_.toByte).toArray
val padding = Array.fill[Byte](10)(0)
withConnection { connection =>
executeQuery(connection, create)
executePreparedStatement(connection, insert, bytes)
val row = executeQuery(connection, select).rows.get(0)
row("id") === 1
row("binary_column") === bytes ++ padding
}
}
"support VARBINARY type" in {
val create =
"""CREATE TEMPORARY TABLE POSTS (
| id INT NOT NULL AUTO_INCREMENT,
| varbinary_column VARBINARY(20),
| primary key (id))
""".stripMargin
val insert = "INSERT INTO POSTS (varbinary_column) VALUES (?)"
val select = "SELECT * FROM POSTS"
val bytes = (1 to 10).map(_.toByte).toArray
withConnection { connection =>
executeQuery(connection, create)
executePreparedStatement(connection, insert, bytes)
val row = executeQuery(connection, select).rows.get(0)
row("id") === 1
row("varbinary_column") === bytes
}
}
"support BLOB type" in {
val bytes = (1 to 10).map(_.toByte).toArray
testBlob(bytes)
}
"support BLOB type with large values" in {
val bytes = (1 to 2100).map(_.toByte).toArray
testBlob(bytes)
}
}
def testBlob(bytes: Array[Byte]) = {
val create =
"""CREATE TEMPORARY TABLE POSTS (
| id INT NOT NULL,
| blob_column BLOB,
| primary key (id))
""".stripMargin
val insert = "INSERT INTO POSTS (id,blob_column) VALUES (?,?)"
val select = "SELECT id,blob_column FROM POSTS ORDER BY id"
withConnection { connection =>
executeQuery(connection, create)
executePreparedStatement(connection, insert, 1, Some(bytes))
executePreparedStatement(connection, insert, 2, ByteBuffer.wrap(bytes))
executePreparedStatement(
connection,
insert,
3,
Unpooled.wrappedBuffer(bytes)
)
val Some(rows) = executeQuery(connection, select).rows
rows(0)("id") === 1
rows(0)("blob_column") === bytes
rows(1)("id") === 2
rows(1)("blob_column") === bytes
rows(2)("id") === 3
rows(2)("blob_column") === bytes
rows.size === 3
}
}
def compareBytes(row: RowData, column: String, expected: String) =
row(column) === expected.getBytes(CharsetUtil.UTF_8)
}
|
dripower/postgresql-async
|
mysql-async/src/test/scala/com/github/mauricio/async/db/mysql/BinaryColumnsSpec.scala
|
Scala
|
apache-2.0
| 4,375 |
/*
* Copyright (c) 2017-2021, Robby, Kansas State University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum
import org.sireum.test._
class CircularQueueTest extends TestSuite {
val tests = Tests {
"NoDrop" - {
def create: CircularQueue[Z] = CircularQueue.create(3, z"0", T, CircularQueue.Policy.NoDrop)
* - {
val cq3 = create
val x1: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
val r1: Z = cq3.dequeue()
assert(r1 == x1)
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2, x3))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ(x3))
val r3: Z = cq3.dequeue()
assert(r3 == x3 && cq3.elements == MSZ())
}
}
"DropFront" - {
def create: CircularQueue[Z] = CircularQueue.create(3, z"0", T, CircularQueue.Policy.DropFront)
* - {
val cq3 = create
val x1: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
val r1: Z = cq3.dequeue()
assert(r1 == x1)
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2, x3))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ(x3))
val r3: Z = cq3.dequeue()
assert(r3 == x3 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
val x4: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
cq3.enqueue(x4)
assert(cq3.elements == MSZ(x2, x3, x4))
val r1: Z = cq3.dequeue()
assert(r1 == x2 && cq3.elements == MSZ(x3, x4))
val r2: Z = cq3.dequeue()
assert(r2 == x3 && cq3.elements == MSZ(x4))
val r3: Z = cq3.dequeue()
assert(r3 == x4 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
val x4: Z = Z.random
val x5: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
cq3.enqueue(x4)
assert(cq3.elements == MSZ(x2, x3, x4))
cq3.enqueue(x5)
assert(cq3.elements == MSZ(x3, x4, x5))
val r1: Z = cq3.dequeue()
assert(r1 == x3 && cq3.elements == MSZ(x4, x5))
val r2: Z = cq3.dequeue()
assert(r2 == x4 && cq3.elements == MSZ(x5))
val r3: Z = cq3.dequeue()
assert(r3 == x5 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
val x4: Z = Z.random
val x5: Z = Z.random
val x6: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
cq3.enqueue(x4)
assert(cq3.elements == MSZ(x2, x3, x4))
cq3.enqueue(x5)
assert(cq3.elements == MSZ(x3, x4, x5))
cq3.enqueue(x6)
assert(cq3.elements == MSZ(x4, x5, x6))
val r1: Z = cq3.dequeue()
assert(r1 == x4 && cq3.elements == MSZ(x5, x6))
val r2: Z = cq3.dequeue()
assert(r2 == x5 && cq3.elements == MSZ(x6))
val r3: Z = cq3.dequeue()
assert(r3 == x6 && cq3.elements == MSZ())
}
}
"DropRear" - {
def create: CircularQueue[Z] = CircularQueue.create(3, z"0", T, CircularQueue.Policy.DropRear)
* - {
val cq3 = create
val x1: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
val r1: Z = cq3.dequeue()
assert(r1 == x1)
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2, x3))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ(x3))
val r3: Z = cq3.dequeue()
assert(r3 == x3 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
val x4: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
cq3.enqueue(x4)
assert(cq3.elements == MSZ(x1, x2, x4))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2, x4))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ(x4))
val r3: Z = cq3.dequeue()
assert(r3 == x4 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
val x4: Z = Z.random
val x5: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
cq3.enqueue(x4)
assert(cq3.elements == MSZ(x1, x2, x4))
cq3.enqueue(x5)
assert(cq3.elements == MSZ(x1, x2, x5))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2, x5))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ(x5))
val r3: Z = cq3.dequeue()
assert(r3 == x5 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
val x4: Z = Z.random
val x5: Z = Z.random
val x6: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x1, x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x1, x2, x3))
cq3.enqueue(x4)
assert(cq3.elements == MSZ(x1, x2, x4))
cq3.enqueue(x5)
assert(cq3.elements == MSZ(x1, x2, x5))
cq3.enqueue(x6)
assert(cq3.elements == MSZ(x1, x2, x6))
val r1: Z = cq3.dequeue()
assert(r1 == x1 && cq3.elements == MSZ(x2, x6))
val r2: Z = cq3.dequeue()
assert(r2 == x2 && cq3.elements == MSZ(x6))
val r3: Z = cq3.dequeue()
assert(r3 == x6 && cq3.elements == MSZ())
}
* - {
val cq3 = create
val x1: Z = Z.random
val x2: Z = Z.random
val x3: Z = Z.random
val x4: Z = Z.random
val x5: Z = Z.random
val x6: Z = Z.random
cq3.enqueue(x1)
assert(cq3.elements == MSZ(x1))
val r1 = cq3.dequeue()
assert(cq3.elements == MSZ() && r1 == x1)
cq3.enqueue(x2)
assert(cq3.elements == MSZ(x2))
cq3.enqueue(x3)
assert(cq3.elements == MSZ(x2, x3))
cq3.enqueue(x4)
assert(cq3.elements == MSZ(x2, x3, x4))
cq3.enqueue(x5)
assert(cq3.elements == MSZ(x2, x3, x5))
cq3.enqueue(x6)
assert(cq3.elements == MSZ(x2, x3, x6))
val r2 = cq3.dequeue()
assert(cq3.elements == MSZ(x3, x6) && r2 == x2)
val r3 = cq3.dequeue()
assert(cq3.elements == MSZ(x6) && r3 == x3)
val r4 = cq3.dequeue()
assert(cq3.elements == MSZ() && r4 == x6)
}
}
}
}
|
sireum/v3-logika-runtime
|
library/shared/src/test/scala/org/sireum/CircularQueueTest.scala
|
Scala
|
bsd-2-clause
| 11,377 |
package scorex.block
import scorex.serialization.Deser
import scala.util.Try
/**
* A generic interface with functionality to convert data into a part of a block and vice versa
*/
trait BlockProcessingModule[BlockPartDataType] extends Deser[BlockField[BlockPartDataType]] {
def parseBytes(bytes: Array[Byte]): Try[BlockField[BlockPartDataType]]
def parseBlockFields(blockFields: BlockField[BlockPartDataType]): BlockPartDataType = blockFields.value
def genesisData: BlockField[BlockPartDataType]
def formBlockData(data: BlockPartDataType): BlockField[BlockPartDataType]
}
|
ScorexProject/Scorex
|
scorex-basics/src/main/scala/scorex/block/BlockProcessingModule.scala
|
Scala
|
cc0-1.0
| 590 |
package xyz.hyperreal.prolog
import scala.math._
import xyz.hyperreal.numbers.ComplexBigInt
object MathConstants {
val pi: Number = Pi
val e: Number = E
val i: Number = ComplexBigInt.i
}
|
edadma/funl
|
prolog/src/main/scala/xyz/hyperreal/prolog/MathConstants.scala
|
Scala
|
mit
| 200 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.models.neuralnets
import breeze.linalg.{DenseMatrix, DenseVector}
import breeze.stats.distributions.Uniform
import io.github.tailhq.dynaml.graph.NeuralGraph
import io.github.tailhq.dynaml.models.ParameterizedLearner
import io.github.tailhq.dynaml.optimization.GradBasedBackPropagation
import io.github.tailhq.dynaml.pipes.DataPipe
import io.github.tailhq.dynaml.probability.RandomVariable
/**
* @author tailhq date 22/03/2017.
*
* Base member of the Neural Network API.
* */
trait NeuralNet[
Data, BaseGraph, Input, Output,
Graph <: NeuralGraph[BaseGraph, Input, Output]] extends
ParameterizedLearner[
Data, Graph, Input, Output,
Stream[(Input, Output)]] {
val transform: DataPipe[Data, Stream[(Input, Output)]]
val numPoints: Int
/**
* Predict the value of the
* target variable given a
* point.
*
**/
override def predict(point: Input) = params.forwardPass(point)
def _neuralStack: Graph = params
}
/**
* Base class for implementations of feed-forward neural network
* models.
*
* @tparam Data The type of the training data.
* @tparam LayerP The type of the layer parameters i.e. weights/connections etc.
* @tparam I The type of the input features, output features and layer activations
* */
class GenericFFNeuralNet[Data, LayerP, I](
trainingAlgorithm: GradBasedBackPropagation[LayerP, I],
data: Data, trans: DataPipe[Data, Stream[(I, I)]],
layerInitializer: RandomVariable[Seq[LayerP]]) extends NeuralNet[
Data, Seq[NeuralLayer[LayerP, I, I]],
I, I, NeuralStack[LayerP, I]] {
val stackFactory: NeuralStackFactory[LayerP, I] = trainingAlgorithm.stackFactory
protected val generator: RandomVariable[Seq[LayerP]] = layerInitializer
override protected val g: Data = data
val num_layers: Int = stackFactory.layerFactories.length + 1
val num_hidden_layers: Int = stackFactory.layerFactories.length - 1
val activations: Seq[Activation[I]] = stackFactory.layerFactories.map(_.activationFunc)
override val transform = trans
override val numPoints = transform(g).length
override protected var params: NeuralStack[LayerP, I] = initParams()
override protected val optimizer: GradBasedBackPropagation[LayerP, I] = trainingAlgorithm
override def initParams() = (generator.sample > stackFactory).run()
/**
* Learn the parameters
* of the model.
*
* */
override def learn() = {
params = optimizer.optimize(numPoints, transform(g), initParams())
}
}
object GenericFFNeuralNet {
/**
* Create a feed forward neural net
* @param trainingAlgorithm The optimization/training routine
* as a [[GradBasedBackPropagation]] instance
* @param data The training data
* @param trans A data pipeline transforming the training data from
* type [[Data]] to [[Stream]] of input patterns and targets
* @param layerInitializer A [[RandomVariable]] which generates samples for
* the layer parameters.
* */
def apply[Data, LayerP, I](
trainingAlgorithm: GradBasedBackPropagation[LayerP, I],
data: Data, trans: DataPipe[Data, Stream[(I, I)]],
layerInitializer: RandomVariable[Seq[LayerP]]) =
new GenericFFNeuralNet[Data, LayerP, I](trainingAlgorithm, data, trans, layerInitializer)
/**
* Returns a random variable which enables sampling
* of layer connection matrices, in the case of feed forward
* neural networks operating on breeze vectors.
*
* */
def getWeightInitializer(num_units_by_layer: Seq[Int])
: RandomVariable[Seq[(DenseMatrix[Double], DenseVector[Double])]] = {
val uni = new Uniform(-1.0, 1.0)
RandomVariable(
num_units_by_layer.sliding(2)
.toSeq
.map(l => (l.head, l.last))
.map((c) => RandomVariable(() => (
DenseMatrix.tabulate(c._2, c._1)((_, _) => uni.draw()),
DenseVector.tabulate(c._2)(_ => uni.draw())))
):_*
)
}
}
|
mandar2812/DynaML
|
dynaml-core/src/main/scala/io/github/tailhq/dynaml/models/neuralnets/NeuralNet.scala
|
Scala
|
apache-2.0
| 4,764 |
package com.typesafe.slick.testkit.util
import org.slf4j.MDC
import scala.language.existentials
import scala.concurrent.{Promise, ExecutionContext, Await, Future, blocking}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag
import scala.util.{Failure, Success}
import scala.util.control.NonFatal
import java.lang.reflect.Method
import java.util.concurrent.{LinkedBlockingQueue, ThreadPoolExecutor, ExecutionException, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import slick.dbio._
import slick.jdbc.JdbcBackend
import slick.util.DumpInfo
import slick.profile.{RelationalProfile, SqlProfile, Capability}
import slick.driver.JdbcProfile
import org.junit.runner.Description
import org.junit.runner.notification.RunNotifier
import org.junit.runners.model._
import org.junit.Assert
import org.reactivestreams.{Subscription, Subscriber, Publisher}
/** JUnit runner for the Slick driver test kit. */
class Testkit(clazz: Class[_ <: DriverTest], runnerBuilder: RunnerBuilder) extends SimpleParentRunner[TestMethod](clazz) {
val driverTest = clazz.newInstance
var tdb: TestDB = driverTest.tdb
def describeChild(ch: TestMethod) = ch.desc
def getChildren = if(tdb.isEnabled) {
driverTest.tests.flatMap { t =>
val ms = t.getMethods.filter { m =>
m.getName.startsWith("test") && m.getParameterTypes.length == 0
}
ms.map { m =>
val tname = m.getName + '[' + tdb.confName + ']'
new TestMethod(tname, Description.createTestDescription(t, tname), m, t)
}
}
} else Nil
override def runChildren(notifier: RunNotifier) = if(!children.isEmpty) {
tdb.cleanUpBefore()
try {
val is = children.iterator.map(ch => (ch, ch.cl.newInstance()))
.filter{ case (_, to) => to.setTestDB(tdb) }.zipWithIndex.toIndexedSeq
val last = is.length - 1
var previousTestObject: GenericTest[_ >: Null <: TestDB] = null
for(((ch, preparedTestObject), idx) <- is) {
val desc = describeChild(ch)
notifier.fireTestStarted(desc)
try {
val testObject =
if(previousTestObject ne null) previousTestObject
else preparedTestObject
previousTestObject = null
try ch.run(testObject) finally {
val skipCleanup = idx == last || (testObject.reuseInstance && (ch.cl eq is(idx+1)._1._1.cl))
if(skipCleanup) {
if(idx == last) testObject.closeKeepAlive()
else previousTestObject = testObject
}
else testObject.cleanup()
}
} catch {
case t: Throwable => addFailure(t, notifier, desc)
} finally notifier.fireTestFinished(desc)
}
} finally tdb.cleanUpAfter()
}
}
abstract class DriverTest(val tdb: TestDB) {
def tests = tdb.testClasses
}
case class TestMethod(name: String, desc: Description, method: Method, cl: Class[_ <: GenericTest[_ >: Null <: TestDB]]) {
private[this] def await[T](f: Future[T]): T =
try Await.result(f, TestkitConfig.asyncTimeout)
catch { case ex: ExecutionException => throw ex.getCause }
def run(testObject: GenericTest[_]): Unit = {
val r = method.getReturnType
testObject match {
case testObject: TestkitTest[_] =>
if(r == Void.TYPE) method.invoke(testObject)
else throw new RuntimeException(s"Illegal return type: '${r.getName}' in test method '$name' -- TestkitTest methods must return Unit")
case testObject: AsyncTest[_] =>
if(r == classOf[Future[_]]) await(method.invoke(testObject).asInstanceOf[Future[Any]])
else if(r == classOf[DBIOAction[_, _, _]]) await(testObject.db.run(method.invoke(testObject).asInstanceOf[DBIO[Any]]))
else throw new RuntimeException(s"Illegal return type: '${r.getName}' in test method '$name' -- AsyncTest methods must return Future or Action")
}
}
}
sealed abstract class GenericTest[TDB >: Null <: TestDB](implicit TdbClass: ClassTag[TDB]) {
protected[this] var _tdb: TDB = null
private[testkit] def setTestDB(tdb: TestDB): Boolean = {
tdb match {
case TdbClass(o) =>
_tdb = o
true
case _ =>
false
}
}
lazy val tdb: TDB = _tdb
private[testkit] var keepAliveSession: tdb.profile.Backend#Session = null
private[this] var unique = new AtomicInteger
val reuseInstance = false
lazy val db = {
val db = tdb.createDB()
keepAliveSession = db.createSession()
if(!tdb.isPersistent && tdb.isShared)
keepAliveSession.force() // keep the database in memory with an extra connection
db
}
final def cleanup() = if(keepAliveSession ne null) {
try if(tdb.isPersistent) tdb.dropUserArtifacts(keepAliveSession)
finally {
try db.close() finally closeKeepAlive()
}
}
final def closeKeepAlive() = {
if(keepAliveSession ne null) keepAliveSession.close()
}
implicit class StringContextExtensionMethods(s: StringContext) {
/** Generate a unique name suitable for a database entity */
def u(args: Any*) = s.standardInterpolator(identity, args) + "_" + unique.incrementAndGet()
}
final def mark[T](id: String, f: => T): T = {
def set(id: String): Unit =
if(id eq null) MDC.remove("debugId")
else MDC.put("debugId", id)
val old = MDC.get("debugId")
try {
set(if(id eq null) id else s" [$id]")
f
} finally set(old)
}
final def mark[R, S <: NoStream, E <: Effect](id: String, f: => DBIOAction[R, S, E]): DBIOAction[R, S, E] =
mark[DBIOAction[R, S, E]](id, f.named(id))
def rcap = RelationalProfile.capabilities
def scap = SqlProfile.capabilities
def jcap = JdbcProfile.capabilities
def tcap = TestDB.capabilities
}
abstract class TestkitTest[TDB >: Null <: TestDB](implicit TdbClass: ClassTag[TDB]) extends GenericTest[TDB] {
@deprecated("Use implicitSession instead of sharedSession", "3.0")
protected final def sharedSession: tdb.profile.Backend#Session = implicitSession
protected implicit def implicitSession: tdb.profile.Backend#Session = {
db
keepAliveSession
}
def ifCap[T](caps: Capability*)(f: => T): Unit =
if(caps.forall(c => tdb.capabilities.contains(c))) f
def ifNotCap[T](caps: Capability*)(f: => T): Unit =
if(!caps.forall(c => tdb.capabilities.contains(c))) f
def assertFail(f: =>Unit) = {
var succeeded = false
try {
f
succeeded = true
} catch {
case e: Exception if !scala.util.control.Exception.shouldRethrow(e) =>
}
if(succeeded) Assert.fail("Exception expected")
}
def assertAllMatch[T](t: TraversableOnce[T])(f: PartialFunction[T, _]) = t.foreach { x =>
if(!f.isDefinedAt(x)) Assert.fail("Expected shape not matched by: "+x)
}
}
abstract class AsyncTest[TDB >: Null <: TestDB](implicit TdbClass: ClassTag[TDB]) extends GenericTest[TDB] {
final override val reuseInstance = true
protected implicit def asyncTestExecutionContext = ExecutionContext.global
/** Test Action: Get the current database session */
object GetSession extends SynchronousDatabaseAction[TDB#Driver#Backend#Session, NoStream, TDB#Driver#Backend, Effect] {
def run(context: TDB#Driver#Backend#Context) = context.session
def getDumpInfo = DumpInfo(name = "<GetSession>")
}
/** Test Action: Check if the current database session is pinned */
object IsPinned extends SynchronousDatabaseAction[Boolean, NoStream, TDB#Driver#Backend, Effect] {
def run(context: TDB#Driver#Backend#Context) = context.isPinned
def getDumpInfo = DumpInfo(name = "<IsPinned>")
}
/** Test Action: Get the current transactionality level and autoCommit flag */
object GetTransactionality extends SynchronousDatabaseAction[(Int, Boolean), NoStream, JdbcBackend, Effect] {
def run(context: JdbcBackend#Context) =
context.session.asInstanceOf[JdbcBackend#BaseSession].getTransactionality
def getDumpInfo = DumpInfo(name = "<GetTransactionality>")
}
/** Test Action: Get the current statement parameters, except for `statementInit` which is always set to null */
object GetStatementParameters extends SynchronousDatabaseAction[JdbcBackend.StatementParameters, NoStream, JdbcBackend, Effect] {
def run(context: JdbcBackend#Context) = {
val s = context.session
JdbcBackend.StatementParameters(s.resultSetType, s.resultSetConcurrency, s.resultSetHoldability, null, s.fetchSize)
}
def getDumpInfo = DumpInfo(name = "<GetStatementParameters>")
}
def ifCap[E <: Effect, R](caps: Capability*)(f: => DBIOAction[R, NoStream, E]): DBIOAction[Unit, NoStream, E] =
if(caps.forall(c => tdb.capabilities.contains(c))) f.andThen(DBIO.successful(())) else DBIO.successful(())
def ifNotCap[E <: Effect, R](caps: Capability*)(f: => DBIOAction[R, NoStream, E]): DBIOAction[Unit, NoStream, E] =
if(!caps.forall(c => tdb.capabilities.contains(c))) f.andThen(DBIO.successful(())) else DBIO.successful(())
def ifCapF[R](caps: Capability*)(f: => Future[R]): Future[Unit] =
if(caps.forall(c => tdb.capabilities.contains(c))) f.map(_ => ()) else Future.successful(())
def ifNotCapF[R](caps: Capability*)(f: => Future[R]): Future[Unit] =
if(!caps.forall(c => tdb.capabilities.contains(c))) f.map(_ => ()) else Future.successful(())
def asAction[R](f: tdb.profile.Backend#Session => R): DBIOAction[R, NoStream, Effect] =
new SynchronousDatabaseAction[R, NoStream, tdb.profile.Backend, Effect] {
def run(context: tdb.profile.Backend#Context): R = f(context.session)
def getDumpInfo = DumpInfo(name = "<asAction>")
}
def seq[E <: Effect](actions: DBIOAction[_, NoStream, E]*): DBIOAction[Unit, NoStream, E] = DBIO.seq[E](actions: _*)
/** Synchronously consume a Reactive Stream and materialize it as a Vector. */
def materialize[T](p: Publisher[T]): Future[Vector[T]] = {
val builder = Vector.newBuilder[T]
val pr = Promise[Vector[T]]()
try p.subscribe(new Subscriber[T] {
def onSubscribe(s: Subscription): Unit = s.request(Long.MaxValue)
def onComplete(): Unit = pr.success(builder.result())
def onError(t: Throwable): Unit = pr.failure(t)
def onNext(t: T): Unit = builder += t
}) catch { case NonFatal(ex) => pr.failure(ex) }
pr.future
}
/** Iterate synchronously over a Reactive Stream. */
def foreach[T](p: Publisher[T])(f: T => Any): Future[Unit] = {
val pr = Promise[Unit]()
try p.subscribe(new Subscriber[T] {
def onSubscribe(s: Subscription): Unit = s.request(Long.MaxValue)
def onComplete(): Unit = pr.success(())
def onError(t: Throwable): Unit = pr.failure(t)
def onNext(t: T): Unit = f(t)
}) catch { case NonFatal(ex) => pr.failure(ex) }
pr.future
}
/** Asynchronously consume a Reactive Stream and materialize it as a Vector, requesting new
* elements one by one and transforming them after the specified delay. This ensures that the
* transformation does not run in the synchronous database context but still preserves
* proper sequencing. */
def materializeAsync[T, R](p: Publisher[T], tr: T => Future[R], delay: Duration = Duration(100L, TimeUnit.MILLISECONDS)): Future[Vector[R]] = {
val exe = new ThreadPoolExecutor(1, 1, 1L, TimeUnit.SECONDS, new LinkedBlockingQueue[Runnable]())
val ec = ExecutionContext.fromExecutor(exe)
val builder = Vector.newBuilder[R]
val pr = Promise[Vector[R]]()
var sub: Subscription = null
def async[T](thunk: => T): Future[T] = {
val f = Future {
Thread.sleep(delay.toMillis)
thunk
}(ec)
f.onFailure { case t =>
pr.tryFailure(t)
sub.cancel()
}
f
}
try p.subscribe(new Subscriber[T] {
def onSubscribe(s: Subscription): Unit = async {
sub = s
sub.request(1L)
}
def onComplete(): Unit = async(pr.trySuccess(builder.result()))
def onError(t: Throwable): Unit = async(pr.tryFailure(t))
def onNext(t: T): Unit = async {
tr(t).onComplete {
case Success(r) =>
builder += r
sub.request(1L)
case Failure(t) =>
pr.tryFailure(t)
sub.cancel()
}(ec)
}
}) catch { case NonFatal(ex) => pr.tryFailure(ex) }
val f = pr.future
f.onComplete(_ => exe.shutdown())
f
}
implicit class AssertionExtensionMethods[T](v: T) {
private[this] val cln = getClass.getName
private[this] def fixStack(f: => Unit): Unit = try f catch {
case ex: AssertionError =>
ex.setStackTrace(ex.getStackTrace.iterator.filterNot(_.getClassName.startsWith(cln)).toArray)
throw ex
}
def shouldBe(o: Any): Unit = fixStack(Assert.assertEquals(o, v))
def shouldNotBe(o: Any): Unit = fixStack(Assert.assertNotSame(o, v))
def should(f: T => Boolean): Unit = fixStack(Assert.assertTrue(f(v)))
def shouldBeA[T](implicit ct: ClassTag[T]): Unit = {
if(!ct.runtimeClass.isInstance(v))
fixStack(Assert.fail("Expected value of type " + ct.runtimeClass.getName + ", got " + v.getClass.getName))
}
}
implicit class CollectionAssertionExtensionMethods[T](v: TraversableOnce[T]) {
private[this] val cln = getClass.getName
private[this] def fixStack(f: => Unit): Unit = try f catch {
case ex: AssertionError =>
ex.setStackTrace(ex.getStackTrace.iterator.filterNot(_.getClassName.startsWith(cln)).toArray)
throw ex
}
def shouldAllMatch(f: PartialFunction[T, _]) = v.foreach { x =>
if(!f.isDefinedAt(x)) fixStack(Assert.fail("Value does not match expected shape: "+x))
}
}
}
|
lukasz-golebiewski/slick
|
slick-testkit/src/main/scala/com/typesafe/slick/testkit/util/Testkit.scala
|
Scala
|
bsd-2-clause
| 13,604 |
package nbmtools
import java.util.zip.ZipFile
object NbmUtils {
private implicit def toBiFunction[A, B, C](f: Function2[A, B, C]) = {
new java.util.function.BiFunction[A, B, C] {
override def apply(a: A, b: B): C = f(a, b)
}
}
implicit def toZipEntryList(file: ZipFile): List[ZipEntry] = {
def f(entry: ZipUtils.Entry, list: List[ZipEntry]): List[ZipEntry] = {
ZipEntry.of(entry)::list
}
ZipUtils.fold(f _, Nil, file).reverse
}
}
|
tkob/nbmtools
|
src/main/scala/nbmtools/NbmUtils.scala
|
Scala
|
mit
| 513 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api.repositories
import com.mohiva.play.silhouette.api.{ AuthInfo, LoginInfo }
import scala.concurrent.Future
import scala.reflect.ClassTag
/**
* A trait that provides the means to persist authentication information for the Silhouette module.
*
* If the application supports the concept of "merged identities", i.e., the same user being
* able to authenticate through different providers, then make sure that the auth info for
* every linked login info gets stored separately.
*/
trait AuthInfoRepository {
/**
* Finds the auth info which is linked with the specified login info.
*
* @param loginInfo The linked login info.
* @param tag The class tag of the auth info.
* @tparam T The type of the auth info to handle.
* @return The found auth info or None if no auth info could be found for the given login info.
*/
def find[T <: AuthInfo](loginInfo: LoginInfo)(implicit tag: ClassTag[T]): Future[Option[T]]
/**
* Adds new auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The auth info to save.
* @tparam T The type of the auth info to handle.
* @return The saved auth info.
*/
def add[T <: AuthInfo](loginInfo: LoginInfo, authInfo: T): Future[T]
/**
* Updates the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be updated.
* @param authInfo The auth info to update.
* @tparam T The type of the auth info to handle.
* @return The updated auth info.
*/
def update[T <: AuthInfo](loginInfo: LoginInfo, authInfo: T): Future[T]
/**
* Saves the auth info for the given login info.
*
* This method either adds the auth info if it doesn't exists or it updates the auth info
* if it already exists.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The auth info to save.
* @tparam T The type of the auth info to handle.
* @return The updated auth info.
*/
def save[T <: AuthInfo](loginInfo: LoginInfo, authInfo: T): Future[T]
/**
* Removes the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be removed.
* @param tag The class tag of the auth info.
* @tparam T The type of the auth info to handle.
* @return A future to wait for the process to be completed.
*/
def remove[T <: AuthInfo](loginInfo: LoginInfo)(implicit tag: ClassTag[T]): Future[Unit]
}
|
mohiva/play-silhouette
|
silhouette/app/com/mohiva/play/silhouette/api/repositories/AuthInfoRepository.scala
|
Scala
|
apache-2.0
| 3,198 |
package service
import dao._
import model._
import org.joda.time.DateTime
import router.dto.ActivityDto
import utils.DatabaseConfig._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scalaz.OptionT
import scalaz.OptionT._
import scalaz.std.scalaFuture._
/**
* Created by gneotux on 17/07/15.
*/
trait ActivityService {
def activityDao: ActivityDao
def attendeeDao: AttendeeDao
def speakerDao: SpeakerDao
def userDao: UserDao
def activityTagDao: ActivityTagDao
def tagDao: TagDao
def add(activity: ActivityDto): Future[Option[Activity]]
def addAttendee(activityId: Int, userId: Int): Future[Option[Attendee]]
def addSpeaker(activityId: Int, userId: Int): Future[Option[Speaker]]
def getAll(): Future[Seq[Activity]]
def getAllAttendees(activityId: Int): Future[Seq[User]]
def getAllSpeakers(activityId: Int): Future[Seq[User]]
def deleteAttendee(activityId: Int, userId: Int): Future[Int]
def deleteSpeaker(activityId: Int, userId: Int): Future[Int]
def get(id: Int): Future[Option[Activity]]
def delete(id: Int): Future[Int]
def update(id: Int, activityDto: ActivityDto): Future[Option[Activity]]
def getAllTags(activityId: Int): Future[Seq[Tag]]
def addTag(activityId: Int, tagId: Int): Future[Option[ActivityTag]]
def deleteTag(activityId: Int, tagId: Int): Future[Int]
def populateActivity: ActivityDto => Activity = (activityDto: ActivityDto) =>
Activity(
0,
activityDto.eventId,
activityDto.locationId,
activityDto.activityTypeId,
activityDto.title,
activityDto.description,
activityDto.objective,
activityDto.startTime,
activityDto.endTime,
activityDto.resources
)
}
object ActivityService extends ActivityService {
override val activityDao = ActivityDao
override val attendeeDao = AttendeeDao
override val speakerDao = SpeakerDao
override val userDao = UserDao
override val activityTagDao = ActivityTagDao
override val tagDao = TagDao
override def add(activity: ActivityDto): Future[Option[Activity]] = db.run {
for {
activityId <- activityDao.add(populateActivity(activity))
activity <- ActivityDao.get(activityId.getOrElse(-1))
} yield activity
}
override def addAttendee(activityId: Int, userId: Int): Future[Option[Attendee]] = {
(for {
activity <- optionT(db.run(activityDao.get(activityId)))
// _ = if (activity.isEmpty) throw new NoSuchElementException(s"Activity not found with activityId: ${activityId}")
user <- optionT(db.run(userDao.get(userId)))
// _ = if (user.isEmpty) throw new NoSuchElementException(s"Atendee with userId: ${userId} not found")
attendeeId <- optionT(db.run(attendeeDao.add(Attendee(0,userId,activityId))).map(Option.apply))
attendee <- optionT(db.run(attendeeDao.get(attendeeId)))
} yield attendee).run
}
override def addSpeaker(activityId: Int, userId: Int): Future[Option[Speaker]] = {
(for {
activity <- optionT(db.run(activityDao.get(activityId)))
// _ = if (activity.isEmpty) throw new NoSuchElementException(s"Activity not found with activityId: ${activityId}")
user <- optionT(db.run(userDao.get(userId)))
// _ = if (user.isEmpty) throw new NoSuchElementException(s"Speaker with userId: ${userId} not found")
attendeeId <- optionT(db.run(speakerDao.add(Speaker(0,userId,activityId)).map(Option.apply)))
speaker <- optionT(db.run(speakerDao.get(attendeeId)))
} yield speaker).run
}
override def deleteAttendee(activityId: Int, userId: Int): Future[Int] = db.run {
attendeeDao.deleteByUserAndActivityId(userId, activityId)
}
override def deleteSpeaker(activityId: Int, userId: Int): Future[Int] = db.run {
speakerDao.deleteByUserAndActivityId(userId, activityId)
}
override def getAll(): Future[Seq[Activity]] = db.run {
activityDao.getAll
}
override def getAllAttendees(activityId: Int): Future[Seq[User]] = db.run{
attendeeDao.getUsersByActivityId(activityId)
}
override def getAllSpeakers(activityId: Int): Future[Seq[User]] = db.run{
speakerDao.getUsersByActivityId(activityId)
}
override def get(id: Int): Future[Option[Activity]] = db.run {
activityDao.get(id)
}
override def delete(id: Int): Future[Int] = db.run {
activityDao.delete(id)
}
override def update(id: Int, activityDto: ActivityDto): Future[Option[Activity]] = {
val toUpdate = populateActivity(activityDto).copy(id = id)
val result = for {
p <- optionT(db.run(activityDao.get(id)))
resultUpdate <- optionT(db.run(activityDao.add(toUpdate)))
updated <- optionT(db.run(activityDao.get(id)))
} yield updated
result.run
}
override def getAllTags(activityId: Int): Future[Seq[Tag]] = db.run{
activityTagDao.getTagsByActivityId(activityId)
}
override def addTag(activityId: Int, tagId: Int): Future[Option[ActivityTag]] = {
(for {
activity <- optionT(db.run(activityDao.get(activityId)))
// _ = if (activity.isEmpty) throw new NoSuchElementException(s"Activity not found with activityId: ${activityId}")
tag <- optionT(db.run(tagDao.get(tagId)))
// _ = if (user.isEmpty) throw new NoSuchElementException(s"Speaker with userId: ${userId} not found")
activityTagId <- optionT(db.run(activityTagDao.add(ActivityTag(0,tagId,activityId)).map(Option.apply)))
activityTag <- optionT(db.run(activityTagDao.get(activityTagId)))
} yield activityTag).run
}
override def deleteTag(activityId: Int, tagId: Int): Future[Int] = db.run {
activityTagDao.deleteByActivityIdAndTagId(activityId, tagId)
}
}
|
Gneotux/pfc
|
src/main/scala/service/ActivityService.scala
|
Scala
|
apache-2.0
| 5,693 |
package com.yammer.metrics.scala
import collection.JavaConversions._
import java.util.concurrent.TimeUnit
import java.io.File
/**
* A Scala façade class for Timer.
*/
class Timer(metric: com.yammer.metrics.Timer) {
/**
* Runs f, recording its duration, and returns the result of f.
*/
def time[A](f: => A): A = {
val ctx = metric.time
try {
f
} finally {
ctx.stop
}
}
/**
* Adds a recorded duration.
*/
def update(duration: Long, unit: TimeUnit) {
metric.update(duration, unit)
}
/**
* Returns a timing [[com.metrics.yammer.core.TimerContext]],
* which measures an elapsed time in nanoseconds.
*/
def timerContext() = metric.time()
/**
* Returns the number of durations recorded.
*/
def count = metric.getCount
/**
* Returns a snapshot of the values in the timer's sample.
*/
def snapshot = metric.getSnapshot
/**
* Returns the fifteen-minute rate of timings.
*/
def fifteenMinuteRate = metric.getFifteenMinuteRate
/**
* Returns the five-minute rate of timings.
*/
def fiveMinuteRate = metric.getFiveMinuteRate
/**
* Returns the mean rate of timings.
*/
def meanRate = metric.getMeanRate
/**
* Returns the one-minute rate of timings.
*/
def oneMinuteRate = metric.getOneMinuteRate
}
|
hailcode/metrics-scala
|
src/main/scala/com/yammer/metrics/scala/Timer.scala
|
Scala
|
apache-2.0
| 1,326 |
package com.theomn.cartography
import net.minecraft.entity.player.EntityPlayerMP
import net.minecraft.server.MinecraftServer
import net.minecraft.world.World
import net.minecraftforge.fml.relauncher.{SideOnly, Side}
import org.apache.logging.log4j.LogManager
import net.minecraft.util.BlockPos
import net.minecraftforge.fml.common.eventhandler.SubscribeEvent
import net.minecraftforge.fml.common.gameevent.TickEvent.ServerTickEvent
import scala.collection.mutable
case class Player(id: Int, name: String, pos: BlockPos, world: World)
object Player {
def apply(entity: EntityPlayerMP) = new Player(
entity.getEntityId,
entity.getDisplayNameString,
entity.getPosition,
entity.worldObj)
}
@SideOnly(Side.SERVER)
class CartographyEventHandler {
val MOVE_THRESHOLD = 100
private val lastPos = mutable.Map[Int, BlockPos]()
def now(): Long = MinecraftServer.getCurrentTimeMillis
val logger = LogManager.getLogger("Cartography")
private var lastTick = now()
val interval = 10000
var processing: Boolean = false
def activePlayers: Array[Player] =
MinecraftServer
.getServer
.getConfigurationManager
.playerEntityList
.toArray
.map(_.asInstanceOf[EntityPlayerMP])
.map(Player(_))
@SubscribeEvent
def tick(e: ServerTickEvent): Unit = {
val currentTime = now()
val delta = currentTime - lastTick
if (!processing && delta > interval) {
processing = true
try {
logger.info(s"Server Tick: $delta")
activePlayers.filter(isMoving).foreach(generateTile)
lastTick = currentTime
} finally processing = false
}
}
def distanceSq(a: BlockPos, b: BlockPos): Int = {
val x = a.getX - b.getX
val z = a.getZ - b.getZ
(x * x) + (z * z)
}
/** Gross side-effect, but this method is used to filter the player out that
* will not produce tiles this tick as well as ensuring our store of last
* positions are up to date.
*/
def isMoving(player: Player): Boolean = {
val newBlock = player.pos
val oldBlock = lastPos.get(player.id)
oldBlock match {
case None =>
lastPos.synchronized {
lastPos.update(player.id, newBlock)
}
true
case Some(block) => if(distanceSq(block, newBlock) >= MOVE_THRESHOLD) {
lastPos.synchronized {
lastPos.update(player.id, newBlock)
}
true
} else false
}
}
def generateTile(player: Player): Unit = {
var tile = MapTile(player)
logger.debug(tile.toString)
tile.save()
tile = null
}
}
|
onelson/cartography
|
src/main/scala/com/theomn/cartography/CartographyEventHandler.scala
|
Scala
|
lgpl-2.1
| 2,591 |
package myutil
/**
* Created by basil on 16/09/14.
*/
object FormattedOutput {
def byteArrayToHex(bytes: Array[Byte]): String = bytes.map{ b => String.format("%02X", new java.lang.Integer(b & 0xff)) }.mkString("'", " ", "'")
}
|
cn-uofbasel/nfn-scala
|
src/main/scala/myutil/FormattedOutput.scala
|
Scala
|
isc
| 234 |
// Copyright (c) 2015 Ben Zimmer. All rights reserved.
package bdzimmer.pixeleditor.model
import org.apache.commons.io.FileUtils
import java.io.File
import scala.collection.JavaConverters._
case class AssetMetadata(id: String, assetType: String, name: String, filename: String, info: String)
object AssetMetadataUtils {
val FieldSep = "\t"
// save asset metadata
def saveAssetMetadata(filename: String, items: List[AssetMetadata]): Unit = {
val pw = new java.io.PrintWriter(new File(filename))
items.foreach(x => {
// scalastyle:ignore regex
pw.println(
x.id + FieldSep
+ x.assetType + FieldSep
+ x.name + FieldSep
+ x.filename + FieldSep
+ x.info)
})
pw.close()
}
// load asset metadata
def loadAssetMetadata(filename: String): List[AssetMetadata] = {
val lines = FileUtils.readLines(new File(filename), "UTF-8").asScala
lines.map(line => {
val x = line.split("\t")
AssetMetadata(x(0), x(1), x(2), x(3), x(4))
}).toList
}
}
|
bdzimmer/pixel-editor
|
src/main/scala/bdzimmer/pixeleditor/model/AssetMetadata.scala
|
Scala
|
bsd-3-clause
| 1,049 |
package com.github.mrpowers.spark.daria.utils
import org.apache.spark.sql.{DataFrame, SparkSession}
case class DariaSchemaMismatchError(smth: String) extends Exception(smth)
object SchemaSafeWriter {
// writes to a Parquet data lake if the schema matches the existing schema
// throws an error if the schemas don't match
def parquetAppend(path: String, df: DataFrame): Unit = {
append(path, df, "parquet")
}
def deltaAppend(path: String, df: DataFrame): Unit = {
append(path, df, "delta")
}
def append(path: String, df: DataFrame, fileFormat: String): Unit = {
val spark = SparkSession.getActiveSession.get
val existingDF = spark.read.parquet(path)
val existingSchema = existingDF.schema
if (existingSchema.equals(df.schema)) {
df.write.format(fileFormat).mode("append").save(path)
} else {
println("Existing schema:")
existingDF.printSchema()
println("New schema:")
df.printSchema()
throw DariaSchemaMismatchError(s"The new schema doesn't match the existing schema")
}
}
}
|
MrPowers/spark-daria
|
src/main/scala/com/github/mrpowers/spark/daria/utils/SchemaSafeWriter.scala
|
Scala
|
mit
| 1,078 |
/*
* Copyright (c) 2011-14 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Test
import org.junit.Assert._
import poly._
import ops.hlist.Mapper
import test._
import testutil._
/** Polymorphic singleton function. */
object singleton extends (Id ~> Set) {
def apply[T](t : T) = Set(t)
}
/** Polymorphic function selecting an arbitrary element from a non-empty `Set`. */
object choose extends (Set ~> Option) {
def apply[T](s : Set[T]) = s.headOption
}
/** Polymorphic function creating singleton `List`s. */
object list extends (Id ~> List) {
def apply[T](t : T) = List(t)
}
/** Polymorphic function returning the head of a `List`. */
object headOption extends (List ~> Option) {
def apply[T](l : List[T]) = l.headOption
}
/** Polymorphic function which injects a value into an `Option`. */
object option extends (Id ~> Option) {
def apply[T](t : T) = Option(t)
}
/** Polymorphic function testing whether or not an `Option` is defined. */
object isDefined extends (Option ~>> Boolean) {
def apply[T](o : Option[T]) = o.isDefined
}
/** Polymorphic function which opens an `Option`. */
object get extends (Option ~> Id) {
def apply[T](o : Option[T]) = o.get
}
/** Polymorphic addition with type specific cases. */
object plus extends Poly2 {
implicit val caseInt = at[Int, Int](_ + _)
implicit val caseDouble = at[Double, Double](_ + _)
implicit val caseString = at[String, String](_ + _)
implicit def caseList[T] = at[List[T], List[T]](_ ::: _)
}
/** Polymorphic zero with type specific cases. */
object zero extends Poly0 {
implicit val zeroInt = at(0)
implicit val zeroDouble = at(0.0)
implicit val zeroString = at("")
implicit def zeroList[T] = at[List[T]](Nil)
}
class PolyTests {
object toInt extends (Id ~>> Int) {
def apply[T](t : T) = t.toString.toInt
}
object size extends Poly1 {
implicit def default[T] = at[T](_ => 1)
implicit def caseInt = at[Int](_ => 1)
implicit def caseString = at[String](_.length)
implicit def caseList[T] = at[List[T]](_.length)
implicit def caseOption[T](implicit st : Case.Aux[T, Int]) = at[Option[T]](t => 1+(t map size).getOrElse(0))
implicit def caseTuple[T, U](implicit st : Case.Aux[T, Int], su : Case.Aux[U, Int]) = at[(T, U)]{ case (t, u) => size(t)+size(u) }
}
@Test
def testHRFn {
implicitly[choose.Case[Set[Int]]]
implicitly[size.Case[Int]]
implicitly[option.Case[Int]]
implicitly[singleton.Case[Int]]
val si = size(23)
assertEquals(1, si)
val ss = size("foo")
assertEquals(3, ss)
val sl = size(List(1, 2, 3))
assertEquals(3, sl)
val so = size(Option(23))
assertEquals(2, so)
val st = size((23, "foo"))
assertEquals(4, st)
val ls = List("foo", "bar", "baz")
val lss = ls map size
typed[List[Int]](lss)
assertEquals(List(3, 3, 3), lss)
val lsi = ls map identity
typed[List[String]](lsi)
assertEquals(ls, lsi)
val is = identity("foo")
typed[String](is)
assertEquals("foo", is)
// Direct application
val s1 = singleton(23)
typed[Set[Int]](s1)
assertEquals(Set(23), s1)
val s2 = singleton("foo")
typed[Set[String]](s2)
assertEquals(Set("foo"), s2)
def app[G[_]](f : Int => G[Int]) = f(23)
val as = app(singleton)
typed[Set[Int]](as)
assertEquals(Set(23), as)
val al = app(list)
typed[List[Int]](al)
assertEquals(List(23), al)
// Implicit conversion to monomorphic function values
val l1 = List(1, 2, 3) map singleton
typed[List[Set[Int]]](l1)
assertEquals(List(Set(1), Set(2), Set(3)), l1)
val l2 = List("foo", "bar", "baz") map list
typed[List[List[String]]](l2)
assertEquals(List(List("foo"), List("bar"), List("baz")), l2)
val l3 = List(List(1), List(2), List(4)) map headOption
typed[List[Option[Int]]](l3)
assertEquals(List(Option(1), Option(2), Option(4)), l3)
// Use as polymorphic function values
def pairApply[G[_]](f : Id ~> G) = (f(23), f("foo"))
val a1 = pairApply(singleton)
typed[(Set[Int], Set[String])](a1)
assertEquals((Set(23), Set("foo")), a1)
val a2 = pairApply(list)
typed[(List[Int], List[String])](a2)
assertEquals((List(23), List("foo")), a2)
// Use as polymorphic function values with type specific cases
def pairApply2(f : Poly1)(implicit ci : f.Case[Int], cs : f.Case[String]) = (f(23), f("foo"))
val a4 = pairApply2(singleton)
typed[(Set[Int], Set[String])](a4)
assertEquals((Set(23), Set("foo")), a4)
val a5 = pairApply2(list)
typed[(List[Int], List[String])](a5)
assertEquals((List(23), List("foo")), a5)
val a6 = pairApply2(size)
typed[(Int, Int)](a6)
assertEquals((1, 3), a6)
def pairMap[G[_]](f : Id ~> G) = (List(1, 2, 3) map f, List("foo", "bar", "baz") map f)
val m1 = pairMap(singleton)
typed[(List[Set[Int]], List[Set[String]])](m1)
assertEquals((List(Set(1), Set(2), Set(3)), List(Set("foo"), Set("bar"), Set("baz"))), m1)
val m2 = pairMap(list)
typed[(List[List[Int]], List[List[String]])](m2)
assertEquals((List(List(1), List(2), List(3)), List(List("foo"), List("bar"), List("baz"))), m2)
val l5 = List(1, 2, 3)
val l6 = l5 map option
typed[List[Option[Int]]](l6)
assertEquals(List(Option(1), Option(2), Option(3)), l6)
val l7 = l6 map isDefined
typed[List[Boolean]](l7)
assertEquals(List(true, true, true), l7)
val lsi2 = List(Set(1), Set(2), Set(3))
val loi2 = lsi2 map choose
typed[List[Option[Int]]](loi2)
assertEquals(List(Option(1), Option(2), Option(3)), loi2)
val l8 = 23 :: "foo" :: List(1, 2, 3, 4) :: Option("bar") :: (23, "foo") :: 2.0 :: HNil
val l9 = l8 map size
typed[Int :: Int :: Int :: Int :: Int :: Int :: HNil](l9)
assertEquals(1 :: 3 :: 4 :: 4 :: 4 :: 1 :: HNil, l9)
def hlistMap(f : Poly)(implicit mapper : Mapper[f.type, Int :: String :: HNil]) =
(23 :: "foo" :: HNil) map f
val hm1 = hlistMap(singleton)
typed[Set[Int] :: Set[String] :: HNil](hm1)
assertEquals(Set(23) :: Set("foo") :: HNil, hm1)
val hm2 = hlistMap(list)
typed[List[Int] :: List[String] :: HNil](hm2)
assertEquals(List(23) :: List("foo") :: HNil, hm2)
}
@Test
def testCompose {
val so = singleton compose option
val sos = so("foo")
typed[Set[Option[String]]](sos)
assertEquals(Set(Option("foo")), sos)
val soi = so(23)
typed[Set[Option[Int]]](soi)
assertEquals(Set(Option(23)), soi)
}
@Test
def testPolyVal {
val i1 = zero[Int]
typed[Int](i1)
assertEquals(0, i1)
val i2 = 23+zero[Int]
typed[Int](i2)
assertEquals(23, i2)
val s1 = zero[String]
typed[String](s1)
assertEquals("", s1)
val s2 = "foo"+zero[String]
typed[String](s2)
assertEquals("foo", s2)
val l1 = zero[List[Int]]
typed[List[Int]](l1)
assertEquals(Nil, l1)
val l2 = List(23)++zero[List[Int]]
typed[List[Int]](l2)
assertEquals(List(23), l2)
}
// Polymophic function value with type-specific cases for two
// argument types. Result type is dependent on argument type
object bidi extends Poly1 {
implicit val caseInt = at[Int](_.toString)
implicit val caseString = at[String](_.toInt)
}
@Test
def testBinary {
val bi = bidi(23)
typed[String](bi)
assertEquals("23", bi)
val bs = bidi("23")
typed[Int](bs)
assertEquals(23, bs)
val lis = 1 :: "2" :: 3 :: "4" :: HNil
val blis = lis map bidi
typed[String :: Int :: String :: Int :: HNil](blis)
assertEquals("1" :: 2 :: "3" :: 4 :: HNil, blis)
}
@Test
def testRotateLeft {
object isd extends Poly3 {
implicit val default = at[Int, String, Double] {
case (i, s, d) => s"i: $i, s: $s, d: $d"
}
}
val r1 = isd(1, "foo", 2.0)
assertTypedEquals[String]("i: 1, s: foo, d: 2.0", r1)
val sdi = isd.rotateLeft[Nat._1]
val r2 = sdi("foo", 2.0, 1)
assertTypedEquals[String]("i: 1, s: foo, d: 2.0", r2)
val dis = isd.rotateLeft[Nat._2]
val r3 = dis(2.0, 1, "foo")
assertTypedEquals[String]("i: 1, s: foo, d: 2.0", r3)
object isdc extends Poly4 {
implicit val default = at[Int, String, Double, Char] {
case (i, s, d, c) => s"i: $i, s: $s, d: $d, c: $c"
}
}
val r4 = isdc(1, "foo", 2.0, 'a')
assertTypedEquals[String]("i: 1, s: foo, d: 2.0, c: a", r4)
val sdci = isdc.rotateLeft[Nat._1]
val r5 = sdci("foo", 2.0, 'a', 1)
assertTypedEquals[String]("i: 1, s: foo, d: 2.0, c: a", r5)
val dcis = isdc.rotateLeft[Nat._2]
val r6 = dcis(2.0, 'a', 1, "foo")
assertTypedEquals[String]("i: 1, s: foo, d: 2.0, c: a", r6)
}
@Test
def testRotateRight {
object isd extends Poly3 {
implicit val default = at[Int, String, Double] {
case (i, s, d) => s"i: $i, s: $s, d: $d"
}
}
val r1 = isd(1, "foo", 2.0)
assertTypedEquals[String]("i: 1, s: foo, d: 2.0", r1)
val dis = isd.rotateRight[Nat._1]
val r2 = dis(2.0, 1, "foo")
assertTypedEquals[String]("i: 1, s: foo, d: 2.0", r2)
val sdi = isd.rotateRight[Nat._2]
val r3 = sdi("foo", 2.0, 1)
assertTypedEquals[String]("i: 1, s: foo, d: 2.0", r3)
object isdc extends Poly4 {
implicit val default = at[Int, String, Double, Char] {
case (i, s, d, c) => s"i: $i, s: $s, d: $d, c: $c"
}
}
val r4 = isdc(1, "foo", 2.0, 'a')
assertTypedEquals[String]("i: 1, s: foo, d: 2.0, c: a", r4)
val cisd = isdc.rotateRight[Nat._1]
val r5 = cisd('a', 1, "foo", 2.0)
assertTypedEquals[String]("i: 1, s: foo, d: 2.0, c: a", r5)
val dcis = isdc.rotateRight[Nat._2]
val r6 = dcis(2.0, 'a', 1, "foo")
assertTypedEquals[String]("i: 1, s: foo, d: 2.0, c: a", r6)
}
}
|
TomasMikula/shapeless
|
core/src/test/scala/shapeless/poly.scala
|
Scala
|
apache-2.0
| 10,515 |
package comms
import org.scalajs.dom
import scala.concurrent.Future
import scalajs.concurrent.JSExecutionContext.Implicits.runNow
import upickle.default._
import upickle.Js
import autowire._
object Client extends autowire.Client[Js.Value, Reader, Writer]{
override def doCall(req: Request): Future[Js.Value] = {
dom.ext.Ajax.post(
url = "http://localhost:9000/api/" + req.path.mkString("/"),
data = upickle.json.write(Js.Obj(req.args.toSeq:_*))
).map(_.responseText)
.map(upickle.json.read)
}
def read[Result: Reader](p: Js.Value) = readJs[Result](p)
def write[Result: Writer](r: Result) = writeJs(r)
}
|
aholland/play-scalajs-workbench-example
|
workbench/src/main/scala/comms/Client.scala
|
Scala
|
mit
| 639 |
package io.circe
import cats.data.Xor
trait Parser extends Serializable {
def parse(input: String): Xor[ParsingFailure, Json]
def decode[A](input: String)(implicit d: Decoder[A]): Xor[Error, A] =
parse(input).flatMap { json => d(Cursor(json).hcursor) }
}
|
beni55/circe
|
core/shared/src/main/scala/io/circe/Parser.scala
|
Scala
|
apache-2.0
| 266 |
package com.alanjz.microstrike.weapon
trait MachineGun {
}
|
spacenut/microstrike
|
src/com/alanjz/microstrike/weapon/MachineGun.scala
|
Scala
|
gpl-2.0
| 61 |
package x7c1.linen.modern.init.inspector
import x7c1.linen.glue.res.layout.{SourceSearchRow, SourceSearchRowClientError, SourceSearchRowFooter, SourceSearchRowLabel, SourceSearchRowOriginError, SourceSearchRowSourceItem, SourceSearchRowSourceNotFound}
import x7c1.linen.repository.inspector.{ClientLoadingError, DiscoveredSource, DiscoveredSourceLabel, Footer, NoSourceFound, OriginLoadingError, SourceSearchReportRow}
import x7c1.wheat.ancient.resource.ViewHolderProvider
import x7c1.wheat.lore.resource.ProviderSelectable
import x7c1.wheat.modern.resource.ViewHolderProviders
import x7c1.wheat.modern.sequence.Sequence
object SearchReportRowProviders {
implicit def selectable[A <: Sequence[SourceSearchReportRow]]: ProviderSelectable[A, SearchReportRowProviders] =
new ProviderSelectable[A, SearchReportRowProviders] {
override def selectProvider(position: Int, sequence: A, providers: SearchReportRowProviders) = {
sequence findAt position match {
case Some(x: DiscoveredSourceLabel) => providers.forLabel
case Some(x: DiscoveredSource) => providers.forSourceItem
case Some(x: OriginLoadingError) => providers.forErrorItem
case Some(x: NoSourceFound) => providers.forNoSource
case Some(x: ClientLoadingError) => providers.forClientError
case Some(x: Footer) => providers.forFooter
case _ => ???
}
}
}
}
class SearchReportRowProviders (
val forLabel: ViewHolderProvider[SourceSearchRowLabel],
val forSourceItem: ViewHolderProvider[SourceSearchRowSourceItem],
val forErrorItem: ViewHolderProvider[SourceSearchRowOriginError],
val forNoSource: ViewHolderProvider[SourceSearchRowSourceNotFound],
val forClientError: ViewHolderProvider[SourceSearchRowClientError],
val forFooter: ViewHolderProvider[SourceSearchRowFooter]
) extends ViewHolderProviders[SourceSearchRow]{
override protected def all = Seq(
forLabel,
forSourceItem,
forErrorItem,
forNoSource,
forClientError,
forFooter
)
}
|
x7c1/Linen
|
linen-modern/src/main/scala/x7c1/linen/modern/init/inspector/SearchReportRowProviders.scala
|
Scala
|
mit
| 2,037 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.i18n
// #inject-langs
import javax.inject.Inject
import play.api.i18n.{Lang, Langs}
import play.api.mvc.{BaseController, ControllerComponents}
class ScalaI18nService @Inject()(langs: Langs) {
val availableLangs: Seq[Lang] = langs.availables
}
// #inject-langs
import play.api.i18n.{ Messages, MessagesImpl, MessagesProvider }
class ScalaLangsOperations @Inject()(langs: Langs, messagesApi: play.api.i18n.MessagesApi) {
val lang = langs.availables.head
// #lang-to-locale
val locale: java.util.Locale = lang.toLocale
// #lang-to-locale
// #using-messages-impl
val messages: Messages = MessagesImpl(lang, messagesApi)
val title: String = messages("home.title")
// #using-messages-impl
{
// #using-implicit-messages-provider
implicit val messagesProvider: MessagesProvider = {
MessagesImpl(lang, messagesApi)
}
// uses implicit messages
val title2 = Messages("home.title")
// #using-implicit-messages-provider
}
{
// #custom-message-provider
implicit val customMessagesProvider: MessagesProvider = new MessagesProvider {
// resolve messages at runtime
// ###replace: override def messages: Messages = { ... }
override def messages: Messages = ???
}
// uses implicit messages
val title3: String = Messages("home.title")
// #custom-message-provider
}
}
// #inject-messages-api
import play.api.i18n.MessagesApi
class MyService @Inject()(langs: Langs, messagesApi: MessagesApi) {
val lang: Lang = langs.availables.head
val title: String = messagesApi("home.title")(lang)
}
// #inject-messages-api
// #use-implicit-lang
class MyOtherService @Inject()(langs: Langs, messagesApi: MessagesApi) {
implicit val lang: Lang = langs.availables.head
lazy val title: String = messagesApi("home.title")
}
// #use-implicit-lang
import play.api.i18n.I18nSupport
class MyController @Inject()(val controllerComponents: ControllerComponents) extends BaseController with I18nSupport {
// #lang-cookies
def homePageInFrench = Action {
Redirect("/user/home").withLang(Lang("fr"))
}
def homePageWithDefaultLang = Action {
Redirect("/user/home").clearingLang
}
// #lang-cookies
// #http-supported-langs
def index = Action { request =>
Ok("Languages: " + request.acceptLanguages.map(_.code).mkString(", "))
}
// #http-supported-langs
}
// #using-lang-implicits-trait
import play.api.i18n.LangImplicits
class MyClass @Inject()(val messagesApi: MessagesApi) extends LangImplicits {
def convertToMessage: Unit = {
implicit val lang = Lang("en")
val messages: Messages = lang2Messages // implicit conversion
}
}
// #using-lang-implicits-trait
|
Shenker93/playframework
|
documentation/manual/working/scalaGuide/main/i18n/code/scalaguide/i18n/ScalaI18nService.scala
|
Scala
|
apache-2.0
| 2,779 |
package com.lyrx.text
import java.io.File
import com.lyrx.html.DivBasedTagCollector
import com.lyrx.text.TextTypes.{Par, Pars}
import scala.collection.immutable.HashMap
import scala.collection.immutable.HashMap.HashMap1
import scala.util.matching.Regex
/**
* Created by alex on 17.10.16.
*/
trait ParBasedGenerator[S] {
import TextTypes._
val parData:ParData[S]
def normalPar(aPar:Par):GenBase[S]
def parFromSeq(aSeq:Seq[S]):GenBase[S]
def italicParFromString(s:String):GenBase[S]
def italicPar(aPar:Par):GenBase[S]
def emphasized(s:String):GenBase[S]
def title(s:String):GenBase[S]
def subtitle(s:String):GenBase[S]
def separatorElement():GenBase[S]
def footNote(s:String):GenBase[S]
def itemizeStrings(ss:Seq[String]):GenBase[S]
def listifyStrings(ss:Seq[String]):GenBase[S]
def tableOfContents():GenBase[S]
def code(aString: String):GenBase[S]
def cite(s:String):GenBase[S]
def wordCount(): Int
def convertToString()(implicit coll:Collector[S],ctx:Context):String
def generateTags()(implicit coll:Collector[S],ctx:Context): S
def contents()(implicit coll:Collector[S],ctx:Context):S
def writeOut()(implicit ctx:Context,coll:Collector[S]): Either[File,String]
def codeFile(aFile: String)(implicit ctx:Context,coll: Collector[S]):ParBasedGenerator[S]
def codeExample(aString: String)(implicit coll: Collector[S]):ParBasedGenerator[S]
def annotatedPar(annotation:Option[String])(implicit tagCollector:Collector[S]):ParBasedGenerator[S]
def footNoteFile(fileName:String)(implicit coll:Collector[S],ctx:Context):ParBasedGenerator[S]
def bibliography()(implicit coll:Collector[S]):ParBasedGenerator[S]
def toc()(implicit coll:Collector[S]):ParBasedGenerator[S]
def itemize()(implicit tagCollector:Collector[S]):ParBasedGenerator[S]
def itemize(count:Int)(implicit tagCollector:Collector[S]):ParBasedGenerator[S]
def enumerate()(implicit tagCollector:Collector[S]):ParBasedGenerator[S]
def enumerate(count:Int)(implicit tagCollector:Collector[S]):ParBasedGenerator[S]
def withParagraphs(aName:String)(implicit ctx:Context, tagCollector:Collector[S]):ParBasedGenerator[S]
def normal(aTake: Int)(implicit coll: Collector[S]): ParBasedGenerator[S]
def drop(aDrop:Int):ParBasedGenerator[S]
def all()(implicit coll: Collector[S]): ParBasedGenerator[S]
def normalItalic(takes: Int*)(implicit ctx: Context,coll: Collector[S]): ParBasedGenerator[S]
def italicNormal(takes: Int*)(implicit ctx: Context, coll: Collector[S]): ParBasedGenerator[S]
def italic()(implicit coll: Collector[S]): ParBasedGenerator[S]
def italic(aTake: Int)(implicit coll: Collector[S]):ParBasedGenerator[S]
def newSection(title: String)(implicit coll: Collector[S]): ParBasedGenerator[S]
def newSubSection(s: String)(implicit coll: Collector[S]): ParBasedGenerator[S]
def newSection()(implicit coll: Collector[S]): ParBasedGenerator[S]
def newSubSection()(implicit coll: Collector[S]): ParBasedGenerator[S]
def separator()(implicit coll: Collector[S]): ParBasedGenerator[S]
def trim()(implicit coll: Collector[S]): ParBasedGenerator[S]
def collapse(eitherTagSeq:EitherTagSeq[S]):Seq[S]
/*
implicit class SToEitherTagSeq(s:S){
def mySToEitherTagSeq():EitherTagSeq = Seq[EitherTag](Right(s))
}
*/
}
|
lyrx/lyrxgenerator
|
src/main/scala/com/lyrx/text/ParBasedGenerator.scala
|
Scala
|
gpl-3.0
| 3,290 |
val primerArgumento = if (args.length > 0) args(0) else ""
// similar al switch, pero no necesita breaks y genera un valor directamente que se infiere
val ayuda = primerArgumento match { // ayuda sera de tipo String, inferido por el compilador
case "l" => "formato largo"
case "f" => "formato impresion"
case _ => "por defecto"
}
println(ayuda)
|
romanarranz/NTP
|
S5/estructuraMatch.scala
|
Scala
|
mit
| 353 |
package frameless
import org.scalacheck.{Arbitrary, Prop}
import org.scalacheck.Prop._
import scala.reflect.ClassTag
import shapeless.test.illTyped
import org.scalatest.matchers.should.Matchers
class CreateTests extends TypedDatasetSuite with Matchers {
import TypedEncoder.usingInjection
test("creation using X4 derived DataFrames") {
def prop[
A: TypedEncoder,
B: TypedEncoder,
C: TypedEncoder,
D: TypedEncoder](data: Vector[X4[A, B, C, D]]): Prop = {
val ds = TypedDataset.create(data)
TypedDataset.createUnsafe[X4[A, B, C, D]](ds.toDF()).collect().run() ?= data
}
check(forAll(prop[Int, Char, X2[Option[Country], Country], Int] _))
check(forAll(prop[X2[Int, Int], Int, Boolean, Vector[Food]] _))
check(forAll(prop[String, Food, X3[Food, Country, Boolean], Int] _))
check(forAll(prop[String, Food, X3U[Food, Country, Boolean], Int] _))
check(forAll(prop[
Option[Vector[Food]],
Vector[Vector[X2[Vector[(Person, X1[Char])], Country]]],
X3[Food, Country, String],
Vector[(Food, Country)]] _))
}
test("array fields") {
def prop[T: Arbitrary: TypedEncoder: ClassTag] = forAll {
(d1: Array[T], d2: Array[Option[T]], d3: Array[X1[T]], d4: Array[X1[Option[T]]],
d5: X1[Array[T]]) =>
TypedDataset.create(Seq(d1)).collect().run().head.sameElements(d1) &&
TypedDataset.create(Seq(d2)).collect().run().head.sameElements(d2) &&
TypedDataset.create(Seq(d3)).collect().run().head.sameElements(d3) &&
TypedDataset.create(Seq(d4)).collect().run().head.sameElements(d4) &&
TypedDataset.create(Seq(d5)).collect().run().head.a.sameElements(d5.a)
}
check(prop[Boolean])
check(prop[Byte])
check(prop[Short])
check(prop[Int])
check(prop[Long])
check(prop[Float])
check(prop[Double])
check(prop[String])
}
test("vector fields") {
def prop[T: Arbitrary: TypedEncoder] = forAll {
(d1: Vector[T], d2: Vector[Option[T]], d3: Vector[X1[T]], d4: Vector[X1[Option[T]]],
d5: X1[Vector[T]]) =>
(TypedDataset.create(Seq(d1)).collect().run().head ?= d1) &&
(TypedDataset.create(Seq(d2)).collect().run().head ?= d2) &&
(TypedDataset.create(Seq(d3)).collect().run().head ?= d3) &&
(TypedDataset.create(Seq(d4)).collect().run().head ?= d4) &&
(TypedDataset.create(Seq(d5)).collect().run().head ?= d5)
}
check(prop[Boolean])
check(prop[Byte])
check(prop[Char])
check(prop[Short])
check(prop[Int])
check(prop[Long])
check(prop[Float])
check(prop[Double])
check(prop[String])
}
test("list fields") {
def prop[T: Arbitrary: TypedEncoder] = forAll {
(d1: List[T], d2: List[Option[T]], d3: List[X1[T]], d4: List[X1[Option[T]]],
d5: X1[List[T]]) =>
(TypedDataset.create(Seq(d1)).collect().run().head ?= d1) &&
(TypedDataset.create(Seq(d2)).collect().run().head ?= d2) &&
(TypedDataset.create(Seq(d3)).collect().run().head ?= d3) &&
(TypedDataset.create(Seq(d4)).collect().run().head ?= d4) &&
(TypedDataset.create(Seq(d5)).collect().run().head ?= d5)
}
check(prop[Boolean])
check(prop[Byte])
check(prop[Char])
check(prop[Short])
check(prop[Int])
check(prop[Long])
check(prop[Float])
check(prop[Double])
check(prop[String])
}
test("map fields (scala.Predef.Map / scala.collection.immutable.Map)") {
def prop[A: Arbitrary: TypedEncoder, B: Arbitrary: TypedEncoder] = forAll {
(d1: Map[A, B], d2: Map[B, A], d3: Map[A, Option[B]],
d4: Map[A, X1[B]], d5: Map[X1[A], B], d6: Map[X1[A], X1[B]]) =>
(TypedDataset.create(Seq(d1)).collect().run().head ?= d1) &&
(TypedDataset.create(Seq(d2)).collect().run().head ?= d2) &&
(TypedDataset.create(Seq(d3)).collect().run().head ?= d3) &&
(TypedDataset.create(Seq(d4)).collect().run().head ?= d4) &&
(TypedDataset.create(Seq(d5)).collect().run().head ?= d5) &&
(TypedDataset.create(Seq(d6)).collect().run().head ?= d6)
}
check(prop[String, String])
check(prop[String, Boolean])
check(prop[String, Byte])
check(prop[String, Char])
check(prop[String, Short])
check(prop[String, Int])
check(prop[String, Long])
check(prop[String, Float])
check(prop[String, Double])
}
test("maps with Option keys should not resolve the TypedEncoder") {
val data: Seq[Map[Option[Int], Int]] = Seq(Map(Some(5) -> 5))
illTyped("TypedDataset.create(data)", ".*could not find implicit value for parameter encoder.*")
}
test("not aligned columns should throw an exception") {
val v = Vector(X2(1,2))
val df = TypedDataset.create(v).dataset.toDF()
a [IllegalStateException] should be thrownBy {
TypedDataset.createUnsafe[X1[Int]](df).show().run()
}
}
test("dataset with different column order") {
// e.g. when loading data from partitioned dataset
// the partition columns get appended to the end of the underlying relation
def prop[A: Arbitrary: TypedEncoder, B: Arbitrary: TypedEncoder] = forAll {
(a1: A, b1: B) => {
val ds = TypedDataset.create(
Vector((b1, a1))
).dataset.toDF("b", "a").as[X2[A, B]](TypedExpressionEncoder[X2[A, B]])
TypedDataset.create(ds).collect().run().head ?= X2(a1, b1)
}
}
check(prop[X1[Double], X1[X1[SQLDate]]])
check(prop[String, Int])
}
}
|
imarios/frameless
|
dataset/src/test/scala/frameless/CreateTests.scala
|
Scala
|
apache-2.0
| 5,445 |
package eu.pepot.eu.spark.inputsplitter.helper
import java.io.File
import eu.pepot.eu.spark.inputsplitter.common.file.FileDetails
import Helper._
object TestsHelper {
def resourcesBaseDir(subDir: String) = {
toStringPath("src/test/resources/eu/pepot/eu/spark/inputsplitter/samples", subDir)
}
def resourcesBaseDirWithAbsolutePath(subDir: String) = {
new File("src/test/resources/eu/pepot/eu/spark/inputsplitter/samples", subDir).getAbsolutePath
}
def toFDs(s: String): FileDetails = {
FileDetails(s, new File(s).length())
}
}
|
mauriciojost/spark-input-splitter
|
src/test/scala/eu/pepot/eu/spark/inputsplitter/helper/TestsHelper.scala
|
Scala
|
apache-2.0
| 558 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.cli
import scopt.{ OptionDef, OptionParser, Read }
private[gatling] class GatlingOptionParser(programName: String) extends OptionParser[Unit](programName) {
def help(constant: CommandLineConstant): OptionDef[Unit, Unit] =
help(constant.full).abbr(constant.abbr)
def opt[A: Read](constant: CommandLineConstant): OptionDef[A, Unit] =
opt[A](constant.full).abbr(constant.abbr)
override def errorOnUnknownArgument: Boolean = false
}
|
MykolaB/gatling
|
gatling-core/src/main/scala/io/gatling/core/cli/GatlingOptionParser.scala
|
Scala
|
apache-2.0
| 1,089 |
package io.github.edadma.numbers
import java.math.{RoundingMode, MathContext}
import math._
class BigDecimalMath(val mc: MathContext) {
def this(precision: Int) =
this(new MathContext(precision, RoundingMode.HALF_EVEN))
class Const(compute: => BigDecimal) {
private var _value: BigDecimal = null
def v = {
if (_value == null || _value.mc != mc)
_value = compute
_value
}
}
class ComplexBigDecimalConst(compute: => ComplexBigDecimal) {
private var _value: ComplexBigDecimal = null
def v: ComplexBigDecimal = {
if (_value == null || _value.bdmath.mc != mc)
_value = compute
_value
}
}
class QuaternionBigDecimalConst(compute: => QuaternionBigDecimal) {
private var _value: QuaternionBigDecimal = null
def v: QuaternionBigDecimal = {
if (_value == null || _value.bdmath.mc != mc)
_value = compute
_value
}
}
class IntConst(a: Int) extends Const(bigDecimal(a))
class DoubleConst(a: Double) extends Const(bigDecimal(a))
protected val ZERO = new IntConst(0)
protected val QUARTER = new DoubleConst(.25)
protected val ONE = new IntConst(1)
protected val TWO = new IntConst(2)
protected val THREE = new IntConst(3)
protected val FOUR = new IntConst(4)
val Pi = new Const(compute_pi)
val E = new Const(compute_e)
val LN2 = new Const(compute_ln2)
val IC = new ComplexBigDecimalConst(ComplexBigDecimal(0, 1)(this))
val IQ = new QuaternionBigDecimalConst(QuaternionBigDecimal(0, 1, 0, 0)(this))
val JQ = new QuaternionBigDecimalConst(QuaternionBigDecimal(0, 0, 1, 0)(this))
val KQ = new QuaternionBigDecimalConst(QuaternionBigDecimal(0, 0, 0, 1)(this))
private[numbers] val ZEROC = new ComplexBigDecimalConst(ComplexBigDecimal(0, 0)(this))
private[numbers] val ONEC = new ComplexBigDecimalConst(ComplexBigDecimal(1, 0)(this))
private[numbers] val ZEROQ = new QuaternionBigDecimalConst(QuaternionBigDecimal(0, 0, 0, 0)(this))
private[numbers] val ONEQ = new QuaternionBigDecimalConst(QuaternionBigDecimal(1, 0, 0, 0)(this))
private[numbers] def bigDecimal(n: Int): BigDecimal = BigDecimal(n, mc)
private[numbers] def bigDecimal(n: Double): BigDecimal = BigDecimal(n, mc)
private[numbers] def inv(x: BigDecimal) = ONE.v / x
private[numbers] def xx(x: BigDecimal) = x * x
private def compute_ln2 = {
var res = ZERO.v
var p3 = THREE.v
var p4 = FOUR.v
var term = 1.0 / p3 + 1.0 / p4
var k = 1
while (term.scale < term.precision * 2) {
res += term
p3 *= 3
p4 *= 4
k += 1
term = (1.0 / p3 + 1.0 / p4) / k
}
res.round(mc)
}
private def compute_pi = {
var a = ONE.v
var b = inv(BigDecimalMath.sqrt(TWO.v)(this))
var t = QUARTER.v
var x = 1
while (a != b) {
val y = a
a = (a + b) / TWO.v
b = BigDecimalMath.sqrt(b * y)(this)
t = t - BigDecimal(x) * xx(y - a)
x <<= 1
}
xx(a + b) / (FOUR.v * t)
}
private def compute_e = {
var result = ZERO.v
var term = TWO.v
var d = ONE.v
var i = TWO.v
while (term.scale < term.precision * 2) {
result += term
d *= i
i += 1
term = inv(d)
}
result.round(mc)
}
}
object BigDecimalMath {
private val LN2D = math.log(2)
object decimal128 {
implicit val bdmath = new BigDecimalMath(MathContext.DECIMAL128)
}
def ln(x: BigDecimal)(implicit bdmath: BigDecimalMath) = {
val p = bdmath.mc.getPrecision * math.log(10) / LN2D
val m = ceil(p / 2 - math.log(x.toDouble) / LN2D).toInt
val s = x * bdmath.TWO.v.pow(m)
bdmath.Pi.v / (bdmath.TWO.v * agm(bdmath.ONE.v, bdmath.FOUR.v / s)) - m * bdmath.LN2.v
}
def agm(x: BigDecimal, y: BigDecimal)(implicit bdmath: BigDecimalMath) = {
def am(a: BigDecimal, b: BigDecimal) = (a + b) / 2
def gm(a: BigDecimal, b: BigDecimal) = sqrt(a * b)
def recur(an: BigDecimal, gn: BigDecimal): BigDecimal = {
val anp1 = am(an, gn)
val gnp1 = gm(an, gn)
if ((anp1 - gnp1).abs <= an.ulp)
anp1
else
recur(anp1, gnp1)
}
recur(am(x, y), gm(x, y))
}
def sqrt(x: BigDecimal)(implicit bdmath: BigDecimalMath) = {
var new_guess = x / bdmath.TWO.v
var current_guess = x
while (current_guess != new_guess) {
current_guess = new_guess
new_guess = (current_guess + x / current_guess) / bdmath.TWO.v
}
new_guess
}
def exp(a: BigDecimal)(implicit bdmath: BigDecimalMath) = {
val x_ = a
var result = x_ + bdmath.ONE.v
var n = x_
var d = bdmath.ONE.v
var term = x_
var i = 2
while (term.scale < term.precision * 2) {
n *= x_
d *= bdmath.bigDecimal(i)
term = n / d
result += term
i += 1
}
result.round(bdmath.mc)
}
def log(b: BigDecimal, x: BigDecimal)(implicit bdmath: BigDecimalMath) =
ln(x) / ln(b)
def pow(x: BigDecimal, y: BigDecimal)(implicit bdmath: BigDecimalMath) =
exp(y * ln(x))
def pow(x: BigDecimal, y: Double)(implicit bdmath: BigDecimalMath) =
exp(bdmath.bigDecimal(y) * ln(x))
def sin(a: BigDecimal)(implicit bdmath: BigDecimalMath) = {
var term = a
val x2 = bdmath.xx(a)
var n = term
var d = BigInt(1)
var result = bdmath.ZERO.v
var i = 3
while (term.scale < term.precision * 2) {
if ((i & 2) == 0)
result -= term
else
result += term
n *= x2
d *= BigInt((i - 1) * i)
term = n / BigDecimal(d)
i += 2
}
// if (result.compareTo( ONE ) > 0)
// return ONE;
// else if (result.compareTo( NEG_ONE ) < 0)
// return NEG_ONE;
result.round(bdmath.mc)
}
def cos(a: BigDecimal)(implicit bdmath: BigDecimalMath) = {
var term = bdmath.ONE.v
val x2 = bdmath.xx(a)
var n = term
var d = BigInt(1)
var result = bdmath.ZERO.v
var i = 2
while (term.scale < term.precision * 2) {
if ((i & 2) == 0)
result -= term
else
result += term
n *= x2
d *= BigInt((i - 1) * i)
term = n / BigDecimal(d)
i += 2
}
// if (result.compareTo( ONE ) > 0)
// return ONE;
// else if (result.compareTo( NEG_ONE ) < 0)
// return NEG_ONE;
result.round(bdmath.mc)
}
def acos(a: BigDecimal)(implicit bdmath: BigDecimalMath) = {
var a_ = bdmath.ZERO.v
var x1 = a
var halves = bdmath.ONE.v
require(a.abs <= bdmath.ONE.v, "acos() argument may not exceed one")
while ({ halves /= bdmath.TWO.v; halves.scale < halves.precision * 2 }) if (x1.signum < 0) {
x1 = bdmath.ONE.v - bdmath.TWO.v * bdmath.xx(x1)
a_ += halves
} else
x1 = bdmath.TWO.v * bdmath.xx(x1) - bdmath.ONE.v
(bdmath.Pi.v * a_).round(bdmath.mc)
}
def asin(a: BigDecimal)(implicit bdmath: BigDecimalMath) = {
require(a.abs <= bdmath.ONE.v, "asin() argument may not exceed one")
bdmath.Pi.v / 2 - acos(a)
}
def atan(a: BigDecimal)(implicit bdmath: BigDecimalMath) =
a.signum * acos(bdmath.inv(sqrt(bdmath.xx(a) + bdmath.ONE.v)))
def atan2(y: BigDecimal, x: BigDecimal)(implicit bdmath: BigDecimalMath) =
if (x > 0)
atan(y / x)
else if (y >= 0 && x < 0)
atan(y / x) + bdmath.Pi.v
else if (y < 0 && x < 0)
atan(y / x) - bdmath.Pi.v
else if (y > 0 && x == 0)
bdmath.Pi.v / 2
else if (y < 0 && x == 0)
-bdmath.Pi.v / 2
else
bdmath.bigDecimal(0)
def atanh(x: BigDecimal)(implicit bdmath: BigDecimalMath) =
(ln(bdmath.ONE.v + x) - ln(bdmath.ONE.v - x)) / bdmath.TWO.v
}
|
edadma/numbers
|
shared/src/main/scala/io/github/edadma/numbers/BigDecimalMath.scala
|
Scala
|
mit
| 7,568 |
package com.twitter.finagle.filter
import com.twitter.finagle._
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.tracing.Annotation.BinaryAnnotation
import com.twitter.finagle.tracing.ForwardAnnotation
import com.twitter.finagle.util.Rng
import com.twitter.logging.HasLogLevel
import com.twitter.logging.Level
import com.twitter.logging.Logger
import com.twitter.util.Future
/**
* Forwards dark traffic to the given service when the given function returns true for a request.
*
* @param darkService Service to take dark traffic
* @param enableSampling if function returns true, the request will forward
* @param statsReceiver keeps stats for requests forwarded, skipped and failed.
* @param forwardAfterService forward the dark request after the service has processed the request
* instead of concurrently.
*/
class DarkTrafficFilter[Req, Rep](
darkService: Service[Req, Rep],
enableSampling: Req => Boolean,
override val statsReceiver: StatsReceiver,
forwardAfterService: Boolean)
extends SimpleFilter[Req, Rep]
with AbstractDarkTrafficFilter {
import DarkTrafficFilter._
def this(
darkService: Service[Req, Rep],
enableSampling: Req => Boolean,
statsReceiver: StatsReceiver
) = this(darkService, enableSampling, statsReceiver, false)
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
val invokeDarkTraffic = enableSampling(request)
val isDarkRequestAnnotation = if (invokeDarkTraffic) DarkRequestTrue else DarkRequestFalse
// Set an identifier so both light service and dark service can
// query the annotation from tracing or from Finagle Local
// context, the same request going through both services should
// have the same dark request key.
ForwardAnnotation.let(Seq(newKeyAnnotation(), isDarkRequestAnnotation)) {
if (forwardAfterService) {
service(request).ensure {
sendDarkRequest(request)(invokeDarkTraffic, darkService)
}
} else {
serviceConcurrently(service, request)(invokeDarkTraffic, darkService)
}
}
}
protected def handleFailedInvocation[R](request: R, t: Throwable): Unit = {
val level = t match {
case hll: HasLogLevel => hll.logLevel
case _ => Level.WARNING
}
log.log(level, t, s"DarkTrafficFilter Failed invocation: ${t.getMessage}")
}
}
object DarkTrafficFilter {
val log: Logger = Logger.get("DarkTrafficFilter")
// the presence of clnt/is_dark_request indicates that the span is associated with a dark request
val DarkRequestAnnotation = BinaryAnnotation("clnt/is_dark_request", true)
// the value of clnt/has_dark_request indicates whether or not the request contains a span that is forwarded to dark service
def DarkRequestTrue = BinaryAnnotation("clnt/has_dark_request", true)
def DarkRequestFalse = BinaryAnnotation("clnt/has_dark_request", false)
def newKeyAnnotation() =
BinaryAnnotation("clnt/dark_request_key", Rng.threadLocal.nextLong(Long.MaxValue))
}
|
twitter/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/filter/DarkTrafficFilter.scala
|
Scala
|
apache-2.0
| 3,021 |
package chandu0101.scalajs.react.components
package materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import japgolly.scalajs.react.raw.React
import japgolly.scalajs.react.vdom.VdomNode
import org.scalajs.dom
import scala.scalajs.js
import scala.scalajs.js.`|`
/**
* This file is generated - submit issues instead of PR against it
*/
case class MuiTableHeaderColumn(key: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
/* The css class name of the root element. */
className: js.UndefOr[String] = js.undefined,
/* Number to identify the header row. This property
is automatically populated when used with TableHeader. */
columnNumber: js.UndefOr[Int] = js.undefined,
/* Not used here but we need to remove it from the root element. */
hoverable: js.UndefOr[Boolean] = js.undefined,
onClick: js.UndefOr[(ReactMouseEvent, ColumnId) => Callback] =
js.undefined,
/* Not used here but we need to remove it from the root element. */
onHover: js.UndefOr[Callback] = js.undefined,
/* Not used here but we need to remove it from the root element. */
onHoverExit: js.UndefOr[Callback] = js.undefined,
/* Override the inline-styles of the root element. */
style: js.UndefOr[CssProperties] = js.undefined,
/* The string to supply to the tooltip. If not
string is supplied no tooltip will be shown. */
tooltip: js.UndefOr[String] = js.undefined,
/* Additional styling that can be applied to the tooltip. */
tooltipStyle: js.UndefOr[CssProperties] = js.undefined) {
def apply(children: VdomNode*) = {
val props = JSMacro[MuiTableHeaderColumn](this)
val component = JsComponent[js.Object, Children.Varargs, Null](Mui.TableHeaderColumn)
component(props)(children: _*)
}
}
|
rleibman/scalajs-react-components
|
core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiTableHeaderColumn.scala
|
Scala
|
apache-2.0
| 2,363 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv
package laws
package discipline
import org.scalacheck.Prop, Prop._
trait VersionSpecificReaderEngineTests { self: ReaderEngineTests =>
def versionSpecificProps: Seq[(String, Prop)] = Seq(
"withFilter" -> forAll(laws.withFilter _),
"toStream" -> forAll(laws.toStream _),
"toTraversable" -> forAll(laws.toTraversable _),
"toIterator" -> forAll(laws.toIterator _)
)
}
|
nrinaudo/scala-csv
|
laws/shared/src/main/scala-2.12/kantan/csv/laws/discipline/VersionSpecificReaderEngineTests.scala
|
Scala
|
mit
| 1,018 |
package colang.ast.raw
import colang.Strategy.Result
import colang.Strategy.Result.{Malformed, NoMatch, Success}
import colang._
import colang.issues._
import colang.tokens._
import scala.annotation.tailrec
/**
* Represents a compiler components that groups tokens into higher-level syntax tree nodes representing relationships
* between them.
*/
trait Parser {
/**
* Constructs an abstract syntax tree (AST) from a sequence of terminal nodes (tokens).
* @param tokens terminal nodes
* @return (root AST node, encountered issues)
*/
def parse(tokens: Seq[Token]): (TranslationUnit, Seq[Issue])
}
/**
* Actual parser implementation.
*/
class ParserImpl extends Parser {
def parse(tokens: Seq[Token]): (TranslationUnit, Seq[Issue]) = {
val tokenStream = TokenStream(tokens.toList)
TranslationUnit.strategy(tokenStream) match {
case Success(translationUnit, issues, _) => (translationUnit, issues)
}
}
}
object ParserImpl {
type Strategy[+N <: Node] = colang.Strategy[TokenStream, N]
/**
* A strategy template for parsing tokens (terminal nodes) as-is, skipping whitespace before them.
* @param tokenType token type
*/
class SingleTokenStrategy[T <: Token](tokenType: Class[T]) extends Strategy[T] {
def apply(stream: TokenStream): Result[TokenStream, T] = {
if (stream.nonEmpty) {
val (token, streamAfterToken) = stream.readNonWhitespace
if (token.getClass == tokenType) {
Success(token.asInstanceOf[T], Seq.empty, streamAfterToken)
} else NoMatch()
} else NoMatch()
}
}
object SingleTokenStrategy {
def apply[T <: Token](tokenType: Class[T]) = new SingleTokenStrategy(tokenType)
}
/**
* A specialized SingleTokenStrategy for parsing identifiers. Produces better error messages when keywords are used
* instead of identifiers.
*/
val identifierStrategy = new Strategy[Identifier] {
def apply(stream: TokenStream): Result[TokenStream, Identifier] = {
if (stream.nonEmpty) {
val (token, streamAfterToken) = stream.readNonWhitespace
token match {
case id: Identifier => Success(id, Seq.empty, streamAfterToken)
case kw: Keyword =>
val issue = Issues.KeywordAsIdentifier(kw.source, kw.text)
Success(Identifier(kw.text, kw.source), Seq(issue), streamAfterToken)
case _ => NoMatch()
}
} else NoMatch()
}
}
/**
* A generic method for parsing sequences of nodes of the same type, possibly separated by a mandatory separator.
* @param stream source token stream
* @param elementStrategy strategy for parsing a single element of the sequence
* @param elementDescription a term describing a single element of the sequence
* @param mandatorySeparator specify Some(Class[Separator]) if sequence elements must be separated by some token
* @param separatorDescription Some(term describing the separator), if a separator was specified
* @param greedy the default behavior (when 'greedy' is false) is to treat unknown tokens as the end of the sequence,
* leaving them in the stream. If this parameter is set to true, this function will read the whole
* stream (which is not always the whole file, see LimitedTokenStream), treating unknown tokens as
* errors.
* @param recoveryStopHints additional stop hints passed to recover() function
* @tparam N sequence element type
* @tparam Separator element separator type
* @return (sequence elements, encountered issues, stream after the sequence)
*/
def parseSequence[N <: Node, Separator <: Token](stream: TokenStream,
elementStrategy: Strategy[N],
elementDescription: Term,
mandatorySeparator: Option[Class[Separator]] = None,
separatorDescription: Option[Term] = None,
greedy: Boolean = false,
recoveryStopHints: Seq[Class[_ <: Token]] = Seq.empty)
: (Seq[N], Seq[Issue], TokenStream) = {
@tailrec
def parseWithoutSeparator(stream: TokenStream,
collectedElements: Vector[N] = Vector.empty,
collectedIssues: Vector[Issue] = Vector.empty): (Vector[N], Vector[Issue], TokenStream) = {
elementStrategy(stream) match {
case Success(element, issues, streamAfterElement) =>
parseWithoutSeparator(streamAfterElement, collectedElements :+ element, collectedIssues ++ issues)
case Malformed(issues, streamAfterElement) =>
parseWithoutSeparator(streamAfterElement, collectedElements, collectedIssues ++ issues)
case NoMatch() =>
if (greedy && stream.nonEmpty) {
val (invalidSource, streamAfterInvalidTokens) = recover(stream, stopHints = recoveryStopHints)
val issue = Issues.MalformedNode(invalidSource, elementDescription)
parseWithoutSeparator(streamAfterInvalidTokens, collectedElements, collectedIssues :+ issue)
} else {
(collectedElements, collectedIssues, stream)
}
}
}
@tailrec
def parseWithSeparator(stream: TokenStream,
separatorType: Class[Separator],
collectedElements: Vector[N] = Vector.empty,
collectedIssues: Vector[Issue] = Vector.empty): (Vector[N], Vector[Issue], TokenStream) = {
val separatorStrategy = SingleTokenStrategy(separatorType)
val (newElements, elementIssues, streamAfterElement) = elementStrategy(stream) match {
case Success(element, issues, streamAfter) => (Seq(element), issues, streamAfter)
case Malformed(issues, streamAfter) => (Seq.empty, issues, streamAfter)
case NoMatch() =>
if (greedy && stream.nonEmpty) {
val (invalidSource, streamAfterInvalidTokens) =
recover(stream, stopHints = recoveryStopHints :+ separatorType)
val issue = Issues.MalformedNode(invalidSource, elementDescription)
(Seq.empty, Seq(issue), streamAfterInvalidTokens)
} else {
(Seq.empty, Seq.empty, stream)
}
}
separatorStrategy(streamAfterElement) match {
case Success(_, separatorIssues, streamAfterSeparator) =>
parseWithSeparator(
streamAfterSeparator,
separatorType,
collectedElements ++ newElements,
collectedIssues ++ elementIssues ++ separatorIssues)
case Malformed(separatorIssues, streamAfterSeparator) =>
//Same as above, Scala does't allow such alternatives in pattern matching.
parseWithSeparator(
streamAfterSeparator,
separatorType,
collectedElements ++ newElements,
collectedIssues ++ elementIssues ++ separatorIssues)
case NoMatch() =>
if (greedy && streamAfterElement.nonEmpty) {
val separatorTerm = Adjectives.Separating applyTo separatorDescription.get
val issue = Issues.MissingNode(streamAfterElement.beforeNext, separatorTerm)
parseWithSeparator(
streamAfterElement,
separatorType,
collectedElements ++ newElements,
collectedIssues ++ elementIssues :+ issue)
} else {
(collectedElements ++ newElements, collectedIssues ++ elementIssues, streamAfterElement)
}
}
}
mandatorySeparator match {
case Some(separatorType) => parseWithSeparator(stream, separatorType)
case None => parseWithoutSeparator(stream)
}
}
/**
* A generic method for parsing sequences of nodes of the same type enclosed in some kind of delimiting tokens
* (parentheses, braces, etc.), possibly separated by a mandatory separator.
* @param stream source token stream
* @param sequenceDescription a term describing the sequence as a whole
* @param elementStrategy strategy for parsing a single element of the sequence
* @param elementDescription a term describing a single element of the sequence
* @param openingElement opening token type
* @param closingElement closing token type
* @param closingElementDescription a term describing the closing token
* @param mandatorySeparator specify Some(Class[Separator]) if sequence elements must be separated by some token
* @param separatorDescription Some(term describing the separator), if a separator was specified
* @param recoveryStopHints additional stop hints passed to recover() function
* @tparam N sequence element type
* @tparam Open opening token type
* @tparam Close closing token type
* @tparam Separator element separator type
* @return if opening token was found, Some(opening token, sequence elements, closing token (if it was found),
* encountered issues, stream after the sequence). If it wasn't, None.
*/
def parseEnclosedSequence[N <: Node,
Open <: Token,
Close <: Token,
Separator <: Token](stream: TokenStream,
sequenceDescription: Term,
elementStrategy: Strategy[N],
elementDescription: Term,
openingElement: Class[Open],
closingElement: Class[Close],
closingElementDescription: Term,
mandatorySeparator: Option[Class[Separator]] = None,
separatorDescription: Option[Term] = None,
recoveryStopHints: Seq[Class[_ <: Token]] = Seq.empty)
: Option[(Open, Seq[N], Option[Close], Seq[Issue], TokenStream)] = {
val openingElementStrategy = SingleTokenStrategy(openingElement)
val closingElementStrategy = SingleTokenStrategy(closingElement)
openingElementStrategy(stream) match {
case Success(open, openIssues, streamAfterOpen) =>
val limitedStream = new LimitedTokenStream(streamAfterOpen, openingElement, closingElement, 1)
val (elements, elementIssues, limitedStreamOnEnd) = parseSequence(
limitedStream,
elementStrategy,
elementDescription,
mandatorySeparator,
separatorDescription,
greedy = true,
recoveryStopHints = recoveryStopHints)
val streamOnClose = limitedStreamOnEnd.asInstanceOf[LimitedTokenStream[Open, Close]].breakOut
val (close, closeIssues, streamAfterClose) = closingElementStrategy(streamOnClose) match {
case Success(c, ci, s) => (Some(c), ci, s)
case Malformed(ci, s) => (None, ci, s)
case NoMatch() =>
val position = if (elements.nonEmpty) elements.last.source.after else open.source.after
val issue = Issues.MissingSequenceClosingElement(position, (sequenceDescription, closingElementDescription))
(None, Seq(issue), streamOnClose)
}
Some(open, elements, close, openIssues ++ elementIssues ++ closeIssues, streamAfterClose)
case Malformed(_, _) | NoMatch() => None
}
}
/**
* Skips a number of tokens in an attempt to recover the stream to a fresh state. This method is used by greedy
* sequence parsers when the element strategy can't match the tokens in the stream. It tries a few common strategies:
* skipping to the closing brace when the opening couldn't be parsed, skipping to the next delimiter token (';' by
* default, others can be specified in 'stopHints' parameter), and skipping to the next line. If none of these was
* possible, all the tokens are discarded, skipping to the end of the stream.
* @param stream source token stream to recover (can't be empty)
* @param stopHints additional 'delimiter' tokens used as recovery stop hints
* @return (skipped source code, stream after recovery)
*/
private def recover(stream: TokenStream,
stopHints: Seq[Class[_ <: Token]] = Seq.empty): (SourceCode, TokenStream) = {
//1: Code block recovery
def recoverCodeBlock: Option[(SourceCode, TokenStream)] = {
@tailrec
def readBlock(stream: TokenStream, level: Int, collectedSource: SourceCode): (SourceCode, TokenStream) = {
if (level > 0) {
val (_, streamOnBrace) = stream.skipUntilOneOf(classOf[LeftBrace], classOf[RightBrace])
if (streamOnBrace.nonEmpty) {
val (brace, streamAfterBrace) = streamOnBrace.read
brace match {
case LeftBrace(source) => readBlock(streamAfterBrace, level + 1, collectedSource + source)
case RightBrace(source) => readBlock(streamAfterBrace, level - 1, collectedSource + source)
}
} else {
(collectedSource + streamOnBrace.beforeNext, streamOnBrace)
}
} else {
(collectedSource, stream)
}
}
if (stream.nonEmpty) {
val (nextToken, restStream) = stream.readNonWhitespace
nextToken match {
case LeftBrace(source) => Some(readBlock(restStream, 1, source))
case _ => None
}
} else None
}
//2: Same line recovery
def recoverSameLine: Option[(SourceCode, TokenStream)] = {
val (droppedSourceOption, streamOnHint) = stream.skipUntilOneOf(stopHints :+ classOf[LeftBrace] :_*)
if (streamOnHint.nonEmpty) {
droppedSourceOption match {
case Some(droppedSource) =>
if (droppedSource.startLine == droppedSource.endLine) {
Some((droppedSource, streamOnHint))
} else None
case None => None
}
} else None
}
//3: Next line recovery
def recoverNextLine: Option[(SourceCode, TokenStream)] = {
val (droppedSourceOption, streamOnNextToken) = stream.skipLine
if (streamOnNextToken.nonEmpty) {
droppedSourceOption match {
case Some(droppedSource) => Some(droppedSource, streamOnNextToken)
case None => Some(recover(streamOnNextToken, stopHints))
}
} else None
}
recoverCodeBlock orElse recoverSameLine orElse recoverNextLine getOrElse {
//Couldn't do anything, discard rest of stream.
if (stream.nonEmpty) {
val (invalidSourceOption, emptyStream) = stream.skipAll
(invalidSourceOption.get, emptyStream)
} else throw new IllegalArgumentException("can't perform recovery on empty stream")
}
}
/**
* Provides a builder interface for parsing groups of nodes, possibly of different types. This could be implemented
* in a much better way if Scala had variadic templates, but we have to work with what we have.
* See the GroupParseBuilder and GroupParseResult for detailed explanation and every second non-trivial node class
* for usage examples.
* @return a GroupParseBuilder object
*/
def parseGroup() = new GroupParseBuilder(Vector.empty)
private case class GroupElement(strategy: Strategy[Node],
description: Option[Term],
stopIfAbsent: Boolean = false,
optional: Boolean = false)
class GroupParseBuilder private[ParserImpl] (elements: Vector[GroupElement]) {
/**
* Adds a new node parsing strategy to the end of the group.
* @param strategy strategy for parsing the node
* @param description a term describing the node
* @return a GroupParseBuilder object
*/
def element(strategy: Strategy[Node],
description: Term) = {
new GroupParseBuilder(elements :+ GroupElement(strategy, Some(description)))
}
/**
* Adds a new node parsing strategy to the end of the group. If the node can't be parsed, the group parsing is
* aborted and all consequent elements are assumed Absent().
* @param strategy strategy for parsing the node.
* @return a GroupParseBuilder object
*/
def definingElement(strategy: Strategy[Node]) = {
new GroupParseBuilder(elements :+ GroupElement(strategy, None, stopIfAbsent = true))
}
/**
* Adds a new node parsing strategy to the end of the group. Even if the node can't be parsed, no errors are
* generated.
* @param strategy strategy for parsing the node.
* @return a GroupParseBuilder object
*/
def optionalElement(strategy: Strategy[Node]) = {
new GroupParseBuilder(elements :+ GroupElement(strategy, None, optional = true))
}
/**
* Actually performs the parsing.
* @param stream source token stream
* @return a GroupParseResult object
*/
def parse(stream: TokenStream): GroupParseResult = {
@tailrec
def doIt(elements: Vector[GroupElement],
stream: TokenStream,
collectedNodes: Vector[NodeOption[_]] = Vector.empty,
collectedIssues: Vector[Issue] = Vector.empty): (Vector[NodeOption[_]], Vector[Issue], TokenStream) = {
elements match {
case element +: tail =>
element.strategy(stream) match {
case Success(elementNode, elementIssues, streamAfterElement) =>
doIt(tail, streamAfterElement, collectedNodes :+ Present(elementNode), collectedIssues ++ elementIssues)
case Malformed(elementIssues, streamAfterElement) =>
doIt(tail, streamAfterElement, collectedNodes :+ Invalid(), collectedIssues ++ elementIssues)
case NoMatch() if !element.stopIfAbsent =>
if (element.optional) {
doIt(tail, stream, collectedNodes :+ Absent(), collectedIssues)
} else {
val issue = Issues.MissingNode(stream.beforeNext, element.description.get)
doIt(tail, stream, collectedNodes :+ Absent(), collectedIssues :+ issue)
}
case _ =>
val nones = Vector.fill(elements.size)(Absent())
(collectedNodes ++ nones, collectedIssues, stream)
}
case _ =>
(collectedNodes, collectedIssues, stream)
}
}
val (nodes, issues, streamAfterGroup) = doIt(elements, stream)
new GroupParseResult(nodes, issues, streamAfterGroup)
}
}
/**
* Represents a possibly present and valid node. Unlike Strategy.Result, subclasses don't provide encountered issues
* and the new stream. This trait is only used as a return value from group parsing.
* @tparam N node type
*/
sealed trait NodeOption[+N <: Node] {
def toOption: Option[N] = {
this match {
case Present(node) => Some(node)
case _ => None
}
}
}
/**
* Represents a successfully matched node.
*/
case class Present[N <: Node](node: N) extends NodeOption[N]
/**
* Represents a malformed but present node.
*/
case class Invalid[N <: Node]() extends NodeOption[N]
/**
* Represents an absent node.
*/
case class Absent[N <: Node]() extends NodeOption[N]
/**
* A totally unsafe class that exists because variadic templates don't. Use 'as' method with explicitly specified
* node types to extract NodeOptions for individual nodes, encountered issues and the stream after the group.
*/
class GroupParseResult private [ParserImpl] (nodes: Seq[NodeOption[_]], issues: Vector[Issue], stream: TokenStream) {
def as[N1 <: Node] = nodes match {
case e1 +: _ =>
(e1.asInstanceOf[NodeOption[N1]], issues, stream)
}
def as[N1 <: Node, N2 <: Node] = nodes match {
case e1 +: e2 +: _ =>
(e1.asInstanceOf[NodeOption[N1]], e2.asInstanceOf[NodeOption[N2]], issues, stream)
}
def as[N1 <: Node, N2 <: Node, N3 <: Node] = nodes match {
case e1 +: e2 +: e3 +: _ =>
(e1.asInstanceOf[NodeOption[N1]], e2.asInstanceOf[NodeOption[N2]], e3.asInstanceOf[NodeOption[N3]], issues, stream)
}
def as[N1 <: Node, N2 <: Node, N3 <: Node, N4 <: Node] = nodes match {
case e1 +: e2 +: e3 +: e4 +: _ =>
(e1.asInstanceOf[NodeOption[N1]], e2.asInstanceOf[NodeOption[N2]], e3.asInstanceOf[NodeOption[N3]],
e4.asInstanceOf[NodeOption[N4]], issues, stream)
}
def as[N1 <: Node, N2 <: Node, N3 <: Node, N4 <: Node, N5 <: Node] = nodes match {
case e1 +: e2 +: e3 +: e4 +: e5 +: _ =>
(e1.asInstanceOf[NodeOption[N1]], e2.asInstanceOf[NodeOption[N2]], e3.asInstanceOf[NodeOption[N3]],
e4.asInstanceOf[NodeOption[N4]], e5.asInstanceOf[NodeOption[N5]], issues, stream)
}
def as[N1 <: Node, N2 <: Node, N3 <: Node, N4 <: Node, N5 <: Node, N6 <: Node] = nodes match {
case e1 +: e2 +: e3 +: e4 +: e5 +: e6 +: _ =>
(e1.asInstanceOf[NodeOption[N1]], e2.asInstanceOf[NodeOption[N2]], e3.asInstanceOf[NodeOption[N3]],
e4.asInstanceOf[NodeOption[N4]], e5.asInstanceOf[NodeOption[N5]], e6.asInstanceOf[NodeOption[N6]], issues, stream)
}
}
}
|
merkispavel/colang
|
src/main/scala/colang/ast/raw/Parser.scala
|
Scala
|
mit
| 21,505 |
package org.ai4fm.proofprocess.ui.graph
import scala.collection.JavaConversions._
import org.ai4fm.proofprocess.{Attempt, Proof, ProofElem, ProofEntry, ProofParallel, ProofSeq, ProofStore}
import org.ai4fm.proofprocess.core.graph.EmfPProcessTree
import org.eclipse.emf.ecore.EObject
/**
* Calculates ProofProcess graph structure for different elements.
*
* The graph is represented as a map of elements to their children.
*
* @author Andrius Velykis
*/
object PProcessGraph {
def proofTreeGraph(elem: ProofElem): Map[ProofElem, List[ProofElem]] =
proofTreeGraphEntries(elem)
/**
* Creates a graph based on the actual links between ProofEntry elements.
*/
def proofTreeGraphEntries(elem: ProofElem): Map[ProofElem, List[ProofElem]] = {
val ppGraph = EmfPProcessTree.graphConverter.toGraph(elem)
val graph = ppGraph.graph
val nodes = graph.nodes
(nodes foldLeft Map[ProofElem, List[ProofElem]]())((m, n) =>
m + (n.value -> (n.diSuccessors.toList map (_.value))))
}
/**
* Creates a graph based on PP EMF tree element decomposition.
*/
def proofTreeGraphEMF(elem: ProofElem): Map[ProofElem, List[ProofElem]] = {
val emptyGraph = Map[ProofElem, List[ProofElem]]()
def graphEntries(entries: List[ProofElem], cont: List[ProofElem]): (List[ProofElem], Map[ProofElem, List[ProofElem]]) = {
entries.foldRight(cont, emptyGraph) {
case (entry, (cont, graph)) => {
val (entryCont, entryGraph) = proofGraph(entry, cont)
(entryCont, graph ++ entryGraph)
}
}
}
def children(elem: ProofElem): List[ProofElem] = elem match {
case seq: ProofSeq => seq.getEntries.toList
case par: ProofParallel => par.getEntries.toList
}
def proofGraph(elem: ProofElem, cont: List[ProofElem]): (List[ProofElem], Map[ProofElem, List[ProofElem]]) = elem match {
// TODO comments
case entry: ProofEntry => (List(entry), Map(entry -> cont))
case seq: ProofSeq => {
val (entriesCont, entriesGraph) = seq.getEntries.toList.foldRight(cont, emptyGraph) {
case (entry, (cont, graph)) => {
val (entryCont, entryGraph) = proofGraph(entry, cont)
(entryCont, graph ++ entryGraph)
}
}
(seq :: entriesCont, entriesGraph + (seq -> (entriesCont ::: cont)))
}
case par: ProofParallel => {
val branchInfos = par.getEntries.toList.map(e => proofGraph(e, cont))
val branchCont = branchInfos.map(_._1).flatten
val branchGraph = branchInfos.map(_._2).foldRight(emptyGraph)(_ ++ _)
(elem :: branchCont, branchGraph + (elem -> (branchCont ::: cont)))
}
}
proofGraph(elem, Nil)._2
}
def attemptGraph(attempt: Attempt): Map[EObject, List[EObject]] = {
val proofOpt = Option(attempt.getProof)
val proofMap = proofOpt.map(proofTreeGraph).getOrElse(Map())
proofMap ++ Map(attempt -> proofOpt.toList)
}
def proofGraph(proof: Proof): Map[EObject, List[EObject]] = {
val attempts = proof.getAttempts.toList
val attemptsGraph = attempts.foldRight[Map[EObject, List[EObject]]](Map()) {
case (attempt, graph) => graph ++ attemptGraph(attempt)
}
attemptsGraph + (proof -> attempts)
}
def storeGraph(proofStore: ProofStore): Map[EObject, List[EObject]] = {
val proofs = proofStore.getProofs.toList
val proofsGraph = proofs.foldRight[Map[EObject, List[EObject]]](Map()) {
case (proof, graph) => graph ++ proofGraph(proof)
}
proofsGraph + (proofStore -> proofs)
}
}
|
andriusvelykis/proofprocess
|
org.ai4fm.proofprocess.ui/src/org/ai4fm/proofprocess/ui/graph/PProcessGraph.scala
|
Scala
|
epl-1.0
| 3,593 |
package monocle.std
import monocle.MonocleSuite
import monocle.law.discipline.LensTests
import monocle.law.discipline.function.{Cons1Tests, EachTests, ReverseTests, Snoc1Tests}
class Tuple2Spec extends MonocleSuite {
checkAll("first tuple2", LensTests(first[(Int, Char), Int]))
checkAll("second tuple2", LensTests(second[(Int, Char), Char]))
checkAll("each tuple2", EachTests[(Int, Int), Int])
checkAll("reverse tuple2", ReverseTests[(Int, Char), (Char, Int)])
checkAll("cons1 tuple2", Cons1Tests[(Int, Char), Int, Char])
checkAll("snoc1 tuple2", Snoc1Tests[(Int, Char), Int, Char])
}
|
NightRa/Monocle
|
test/src/test/scala/monocle/std/Tuple2Spec.scala
|
Scala
|
mit
| 599 |
package com.sksamuel.elastic4s.search.aggs
import com.sksamuel.elastic4s.http.search.DateRangeBucket
import com.sksamuel.elastic4s.testkit.DockerTests
import com.sksamuel.elastic4s.{ElasticDate, ElasticDateMath, Years}
import org.scalatest.{FreeSpec, Matchers}
import scala.util.Try
class KeyedDateRangeAggregationHttpTest extends FreeSpec with DockerTests with Matchers {
Try {
client.execute {
deleteIndex("daterangeaggs")
}.await
}
client.execute {
createIndex("daterangeaggs") mappings {
mapping("tv") fields(
textField("name").fielddata(true),
dateField("premiere_date").format("dd/MM/yyyy")
)
}
}.await
client.execute(
bulk(
indexInto("daterangeaggs/tv").fields("name" -> "Breaking Bad", "premiere_date" -> "20/01/2008"),
indexInto("daterangeaggs/tv").fields("name" -> "Better Call Saul", "premiere_date" -> "15/01/2014"),
indexInto("daterangeaggs/tv").fields("name" -> "Star Trek Discovery", "premiere_date" -> "27/06/2017"),
indexInto("daterangeaggs/tv").fields("name" -> "Game of Thrones", "premiere_date" -> "01/06/2010"),
indexInto("daterangeaggs/tv").fields("name" -> "Designated Survivor", "premiere_date" -> "12/03/2016"),
indexInto("daterangeaggs/tv").fields("name" -> "Walking Dead", "premiere_date" -> "19/01/2011")
).refreshImmediately
).await
"keyed date range agg" - {
"should support elastic dates" in {
val resp = client.execute {
search("daterangeaggs").matchAllQuery().aggs {
dateRangeAgg("agg1", "premiere_date")
.range("old", ElasticDateMath("15/12/2017").minus(10, Years), ElasticDate("15/12/2017").minus(5, Years))
.range("new", ElasticDateMath("15/12/2017").minus(5, Years), ElasticDate("15/12/2017"))
.keyed(true)
}
}.await.result
resp.totalHits shouldBe 6
val agg = resp.aggs.keyedDateRange("agg1")
agg.buckets.mapValues(_.copy(data = Map.empty)) shouldBe Map(
"old" -> DateRangeBucket(Some("1.1976768E12"), Some("15/12/2007"), Some("1.3555296E12"), Some("15/12/2012"), None, 3, Map.empty),
"new" -> DateRangeBucket(Some("1.3555296E12"), Some("15/12/2012"), Some("1.513296E12"), Some("15/12/2017"), None, 3, Map.empty)
)
}
"should support string dates" in {
val resp = client.execute {
search("daterangeaggs").matchAllQuery().aggs {
dateRangeAgg("agg1", "premiere_date")
.range("old", "15/12/2017||-10y", "15/12/2017||-5y")
.range("new", "15/12/2017||-5y", "15/12/2017||")
.keyed(true)
}
}.await.result
resp.totalHits shouldBe 6
val agg = resp.aggs.keyedDateRange("agg1")
agg.buckets.mapValues(_.copy(data = Map.empty)) shouldBe Map(
"old" -> DateRangeBucket(Some("1.1976768E12"), Some("15/12/2007"), Some("1.3555296E12"), Some("15/12/2012"), None, 3, Map.empty),
"new" -> DateRangeBucket(Some("1.3555296E12"), Some("15/12/2012"), Some("1.513296E12"), Some("15/12/2017"), None, 3, Map.empty)
)
}
}
}
|
Tecsisa/elastic4s
|
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/aggs/KeyedDateRangeAggregationHttpTest.scala
|
Scala
|
apache-2.0
| 3,098 |
package org.scalafmt
import scala.meta.Case
import scala.meta.Tree
import scala.reflect.ClassTag
import scala.reflect.classTag
import java.nio.file.Path
import scala.meta.inputs.Position
import org.scalafmt.internal.Decision
import org.scalafmt.internal.FormatToken
import org.scalafmt.internal.State
import org.scalafmt.util.LoggerOps
import scala.meta.internal.inputs._
import scala.util.control.NoStackTrace
sealed abstract class Error(msg: String) extends Exception(msg)
object Error {
import LoggerOps._
def reportIssue: String =
"Please file an issue on https://github.com/scalameta/scalafmt/issues"
case object UnableToParseCliOptions
extends Error("Failed to parse CLI options")
case class Incomplete(formattedCode: String)
extends Error("Unable to format file due to bug in scalafmt")
case class PreciseIncomplete(pos: Position, formattedCode: String)
extends Error(
pos.formatMessage(
"error",
"Unable to format file due to bug in scalafmt"
)
)
case class CantFindDefnToken(what: String, tree: Tree)
extends Error(
s"Expected keyword of type $what in tree $tree"
)
case class CaseMissingArrow(tree: Case)
extends Error(s"Missing => in case: \n$tree")
case class FormatterChangedAST(diff: String, output: String)
extends Error(s"""Formatter changed AST
|=====================
|$diff
|=====================
|${output.linesIterator.toVector.take(10).mkString("\n")}
|=====================
|Formatter changed AST
""".stripMargin)
case class FormatterOutputDoesNotParse(msg: String, line: Int)
extends Error("Formatter output does not parse:\n" + msg)
case class UnexpectedTree[Expected <: Tree: ClassTag](obtained: Tree)
extends Error(s"""Expected: ${classTag[Expected].runtimeClass.getClass}
|Obtained: ${log(obtained)}""".stripMargin)
case class CantFormatFile(msg: String)
extends Error("scalafmt cannot format this file:\n" + msg)
case class NoopDefaultPolicyApplied(decision: Decision)
extends Error(s"Default policy run on $decision")
case class UnknownStyle(style: String)
extends Error(s"Don't understand style $style")
case class UnableToFindStyle(filename: String, e: Throwable)
extends Error(s"Unable to find style for file $filename. $e")
case class MisformattedFile(file: Path, customMessage: String)
extends Error(s"$file is mis-formatted. $customMessage")
case class SearchStateExploded(
deepestState: State,
partialOutput: String,
ft: FormatToken
) extends Error({
val tok = LoggerOps.log2(ft)
val line = ft.left.pos.endLine
s"Search state exploded on '$tok', line $line"
}) {
def line: Int = ft.left.pos.endLine
}
case class InvalidScalafmtConfiguration(throwable: Throwable)
extends Error(s"Failed to read configuration: $throwable")
case object NoMatchingFiles
extends Error(
"No files formatted/tested. " +
"Verify include/exclude filters and command line arguments."
)
with NoStackTrace
case class InvalidOption(option: String)
extends Error(s"Invalid option $option")
case class FailedToParseOption(path: String, error: Throwable)
extends Error(s"Failed to read option $path, error: $error")
case class IdempotencyViolated(msg: String) extends Error(msg)
case object MegaTestFailed extends Error("Mega test failed.")
}
|
scalameta/scalafmt
|
scalafmt-core/shared/src/main/scala/org/scalafmt/Error.scala
|
Scala
|
apache-2.0
| 3,524 |
package gsd.linux.tools
import util.logging.ConsoleLogger
import java.io.PrintStream
import gsd.linux.cnf.DimacsReader.{DimacsHeader, DimacsProblem}
import org.clapper.argot._
import gsd.linux.cnf.{DimacsReader, ImplBuilder, SATBuilder}
import java.util.Scanner
object ImplGraphMain extends ArgotUtil with ConsoleLogger {
val name = "ImplGraphMain"
import ArgotConverters._
val inParam = parser.parameter[String]("in-file",
"input file containing CNF in dimacs format, stdin if not specified", true)
val outParam = parser.parameter[String]("out-file",
"output file for the implication graph, stdout if not specified", true)
val genFlag = parser.flag[Boolean](List("g"),
"do NOT consider variables that end with '_m' as generated")
def main(args: Array[String]) {
try {
parser.parse(args)
val (header, problem): (DimacsHeader, DimacsProblem) =
(pOpt.value, inParam.value) match {
case (Some(_), Some(_)) =>
parser.usage("Either a project (-p) is specified or input & output parameters are used.")
case (Some(p), None) => (p.header, p.dimacs)
case (None, Some(f)) =>
(DimacsReader.readHeaderFile(f), DimacsReader.readFile(f))
case (None, None) =>
log("Using stdin as input...")
log("Warning: dimacs parsing from stdin is experimental!")
val scanner = new Scanner(System.in)
val header = DimacsReader.readHeader(scanner)
val dimacs = DimacsReader.read(scanner)
(header, dimacs)
}
val output =
(pOpt.value, outParam.value) match {
case (Some(p), None) => new PrintStream(p.implgFile.get)
case (None, Some(f)) => new PrintStream(f)
case _ => System.out
}
execute(header, problem, output)
}
catch {
case e: ArgotUsageException => println(e.message)
}
}
def execute(header: DimacsHeader, dimacs: DimacsProblem,
out: PrintStream) {
log("[INFO] all variables past the first generated variable (%d) are ignored!".format(header.firstGen))
log("Initializing SAT solver...")
val sat = new SATBuilder(dimacs.cnf, dimacs.numVars, header.generated, header.firstGen)
with ImplBuilder with ConsoleLogger
log("Building implication graph...")
val additional = if (!(genFlag.value.getOrElse(false))) {
log("[INFO] Considering features that end with _m as generated...")
header.varMap filter {
case (k,v) => !header.generated.contains(k) && v.endsWith("_m")
} map { _._1 }
} else Nil
val startTime = System.currentTimeMillis()
val g = sat.mkImplicationGraph(header.varMap, additional)
val endTime = System.currentTimeMillis()
log("Implication Graph Computation Time: %d seconds".format((endTime - startTime) / 1000))
out.println(g.toParseString)
}
}
|
jacksonpradolima/linux-variability-analysis-tools.fm-translation
|
src/main/scala/gsd/linux/tools/ImplGraphMain.scala
|
Scala
|
lgpl-3.0
| 2,934 |
package com.sksamuel.scoverage.samples
import akka.actor.{ActorRef, Actor}
import scala.util.Random
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.language.postfixOps
/** @author Stephen Samuel */
class ClientActor(priceEngine: ActorRef, orderEngine: ActorRef) extends Actor {
val MinPrice = BigDecimal.valueOf(50)
// testing for custom setters and getters
private var _clientName: String = "DoIHaveAName?"
def clientName = _clientName
def clientName_=(n: String): Unit = _clientName = n
clientName = "sammy"
clientName = if (System.currentTimeMillis() < 0) "charlie" else "bobby"
def receive = {
case quote: SpotQuote =>
if (quote.ask < MinPrice) {
println("Sending order request")
orderEngine ! MarketOrderRequest(quote.instrument, BigDecimal.valueOf(Random.nextInt(100)))
}
case r: MarketOrderReject =>
println("Order was rejected :(" + r)
case a: MarketOrderAccept =>
println("Order was accepted :)" + a)
}
override def preStart(): Unit = {
// ask for a quote every second for a random equity
context.system.scheduler.schedule(100 milliseconds, 1000 milliseconds) {
priceEngine ! RequestForQuote(InstrumentLoader.randomInstrument)
}
}
}
|
scoverage/sbt-scoverage-samples
|
src/main/scala/com/sksamuel/scoverage/samples/ClientActor.scala
|
Scala
|
apache-2.0
| 1,298 |
//package tests
//
//
//import org.scalatest.FunSuite
//import org.junit.runner.RunWith
//import org.scalatest.junit.JUnitRunner
//import ch.usi.inf.l3.piuma.transform.dsl.ParseTransformerDSL
//
//@RunWith(classOf[JUnitRunner])
//class Test extends FunSuite {
// import ParseTransformerDSL._
// test("should not parse") {
// val text = """|name := "plugin"
// |runsAfter := List("another", "no")
// |runsRightAfter := "previous"
// |runsBefore := List("Next")
// |transform = {
// |tree match {
// |{
// |val d = 3
// |%>
// |}
// |}
// |}""".stripMargin
// assert(parse(text) === false)
// }
//
// test("should parse") {
// val text = """|name := "plugin"
// |runsAfter := List("another", "no")
// |runsBefore := List("Next")
// |transform = {
// |<--%
// |val b = 3
// |%-->
// |tree match <--%
// |val d = 3
// |%-->
// |}""".stripMargin
// assert(parse(text) === true)
// }
//}
|
amanjpro/piuma
|
simple/src/main/scala/tests/Test.scala
|
Scala
|
bsd-3-clause
| 1,006 |
package io.mpjsons.impl.deserializer
import io.mpjsons.JsonTypeDeserializer
import io.mpjsons.impl.StringIterator
class PostTransformDeserializer[T](internalDeserializer: JsonTypeDeserializer[_ <: Any], transform: T => T) extends JsonTypeDeserializer[T]{
override def deserialize(jsonIterator: StringIterator): T = {
transform(internalDeserializer.deserialize(jsonIterator).asInstanceOf[T])
}
}
|
marpiec/mpjsons
|
src/main/scala/io/mpjsons/impl/deserializer/PostTransformDeserializer.scala
|
Scala
|
apache-2.0
| 407 |
/*
* Originally (c) 2014 Dmitry Leskov, http://www.dmitryleskov.com
* Released into the public domain under the Unlicense, http://unlicense.org
*/
package streamhygiene
package part2
import Test._
object Part2 extends AutoConfig {
def input = Stream.continually(1) take problemSize
/*
* It looks like the imperative version of sum() does not leak memory
*/
test("imperative sum(input)"){
def sum(xs: Stream[Int]): Int = {
var scan = xs
var res = 0
while (!scan.isEmpty) {
res += scan.head
scan = scan.tail
}
res
}
sum(input)
}
/*
* In fact, Stream.sum() also stops leaking memory after HotSpot kicks in
*/
test("input.sum"){input.sum}
test("for (i <- 1 to 10000){(input take 10).sum}"){
for (i <- 1 to 10000) {
(input take 10).sum
}
}
test("input.sum"){input.sum}
/*
* But Rules #1 holds, so Rule #4 holds too, albeit in the form
* "When pattern matching on streams, make sure to not use pattern variables
* after the call of a stream consuming function."
*/
test("tailAvg(input)"){
def tailAvg(xs: Stream[Int]): Option[Int] = {
xs match {
case Stream.Empty => None
case y #:: Stream.Empty => None
case y #:: ys => Some(ys.sum / ys.length)
}
}
tailAvg(input)
}
}
|
dmitryleskov/stream-hygiene
|
src/streamhygiene/part2/Part2.scala
|
Scala
|
unlicense
| 1,330 |
package example
case class Box[T](var element: T) {
def get(): T = element
def set(newElement: T): Unit = {
element = newElement
}
}
object Collection {
def test0(): Unit = {
val box1 = new Box[Int](10)
println(s"box1 => $box1")
println(s"box1.get() => ${box1.get}")
box1.set(0)
println(s"box1.get() => ${box1.get}")
val box2 = new Box[Animal](Cat)
println(s"box2 => $box2")
println(s"box2.get() => ${box2.get}")
box2.set(Dog)
println(s"box2.get() => ${box2.get}")
val box3 = new Box(Cat)
println(s"box3 => $box3") // box3 is Box[Cat]
// println(s"box3.get() => ${box3.get}")
// box3.set(Dog) // type mismatch
// println(s"box3.get() => ${box3.get}")
}
def toFizzBuzz(numbers: List[Int]): List[String] = numbers.map((i: Int) => i match {
case x if x % 15 == 0 => "FizzBuzz"
case x if x % 3 == 0 => "Fizz"
case x if x % 5 == 0 => "Buzz"
case x => x.toString
})
def test1(): Unit = {
val numbers = (1 to 15).toList
val fizzBuzzList = toFizzBuzz(numbers)
fizzBuzzList.foreach(println)
}
def mapTR[T, R](t: T, r: R): (T, R) = (t, r)
def toTupleOfTR[T, R](list: List[T], r: R): List[Tuple2[T, R]] = {
val f: (T, R) => Tuple2[T, R] = mapTR[T, R]
list.map(t => f(t, r))
}
def test2(): Unit = {
val numbers = (1 to 3).toList
val r = "ABCDE"
val tupleList = toTupleOfTR(numbers, r)
tupleList.foreach((t: (Int, String)) => println(t))
}
def test3(): Unit = {
"hello".foreach(println)
}
def test4(): Unit = {
val arr = Array("hello", "world")
println(arr(0))
arr(1) = "scala"
println(arr(1))
}
def threeTimesThree(list: List[Int]): List[Int] = list match {
case head :: tail if head % 3 == 0 =>
(head * 3) :: threeTimesThree(tail)
case head :: tail =>
head :: threeTimesThree(tail)
case Nil => Nil
}
def test5(): Unit = {
val list1 = List("hello", "world")
println(s"list1 => $list1")
val list2 = 1 :: 2 :: 3 :: Nil // Nil.::(1).::(2).::(3)
println(s"list2 => $list2")
val list3 = 0 :: list2
println(s"list3 => $list3")
val list4 = threeTimesThree(list3)
println(s"list4 => $list4")
}
def test6(): Unit = {
val set1 = Set(3, 2, 1, 3)
println(s"set1 => $set1")
val set2 = List(3, 2, 1, 3).toSet
println(s"set2 => $set2")
}
def test7(): Unit = {
val map1 = Map((1, "a"), (2, "b"))
println(s"map1 => $map1")
val map2 = Map(1 -> "a", 2 -> "b")
println(s"map2 => $map2")
val map3 = List(1 -> "a", 2 -> "b").toMap
println(s"map3 => $map3")
println(s"map3.get(1) =>${map3.get(1)}")
println(s"map3.get(10) =>${map3.get(10)}")
}
def test8(): Unit = {
val list1 = List("a", "b", "c")
list1.zipWithIndex.foreach(pair => println(s"pair => ${pair._1}, ${pair._2}"))
val list2 = list1.filter(c => c != "b")
println(s"list2 => $list2")
val count = list1.count(c => c != "b")
println(s"count => $count")
val contains = list1.contains("b")
println(s"contains => $contains")
val list3 = list1 ++ list2
println(s"list3 => $list3")
val list4: List[Any] = list1 ++ List(1, 2, 3)
println(s"list4 => $list4")
val mkString = list4.mkString("[[", ",, ", "]]")
println(s"mkString => $mkString")
}
}
|
ohtomi/sandbox
|
scala-start/src/main/scala/example/Collection.scala
|
Scala
|
mit
| 3,336 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.stats
import com.beust.jcommander.{Parameter, ParameterException}
import org.geotools.data.DataStore
import org.locationtech.geomesa.index.stats.HasGeoMesaStats
import org.locationtech.geomesa.tools.stats.StatsTopKCommand.StatsTopKParams
import org.locationtech.geomesa.tools.{Command, DataStoreCommand}
import org.locationtech.geomesa.utils.stats.{Stat, TopK}
import org.opengis.filter.Filter
trait StatsTopKCommand[DS <: DataStore with HasGeoMesaStats] extends DataStoreCommand[DS] {
override val name = "stats-top-k"
override val params: StatsTopKParams
override def execute(): Unit = withDataStore(topK)
protected def topK(ds: DS): Unit = {
val sft = ds.getSchema(params.featureName)
if (sft == null) {
throw new ParameterException(s"Schema '${params.featureName}' does not exist")
}
val attributes = getAttributesFromParams(sft, params)
val filter = Option(params.cqlFilter).getOrElse(Filter.INCLUDE)
val k = Option(params.k).map(_.intValue)
if (params.exact) {
Command.user.info("Running stat query...")
} else if (filter != Filter.INCLUDE) {
Command.user.warn("Non-exact stat queries may not fully account for the specified CQL filter")
}
val results = ds.stats.getSeqStat[TopK[Any]](sft, attributes.map(Stat.TopK), filter, params.exact)
attributes.foreach { attribute =>
Command.output.info(s"Top values for '$attribute':")
val stat = results.find(_.property == attribute)
stat match {
case None => Command.output.info(" unavailable")
case Some(s) =>
val stringify = Stat.stringifier(sft.getDescriptor(attribute).getType.getBinding)
s.topK(k.getOrElse(s.size)).foreach { case (value, count) =>
Command.output.info(s" ${stringify(value)} ($count)")
}
}
}
}
}
object StatsTopKCommand {
// @Parameters(commandDescription = "Enumerate the most frequent values in a GeoMesa feature type")
trait StatsTopKParams extends StatsParams with AttributeStatsParams {
@Parameter(names = Array("-k"), description = "Number of top values to show")
var k: Integer = _
}
}
|
elahrvivaz/geomesa
|
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/stats/StatsTopKCommand.scala
|
Scala
|
apache-2.0
| 2,651 |
package com.twitter.finagle.server
import com.twitter.concurrent.AsyncSemaphore
import com.twitter.finagle.filter.{MaskCancelFilter, RequestSemaphoreFilter}
import com.twitter.finagle.service.TimeoutFilter
import com.twitter.finagle.stats.{StatsReceiver, ServerStatsReceiver}
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util._
import com.twitter.finagle.{param, Stack, Stackable}
import com.twitter.finagle.{Server, Service, ServiceFactory, ListeningServer}
import com.twitter.util._
import java.net.SocketAddress
/**
* The default Server implementation. It is given a Listener (eg.
* [[com.twitter.finagle.netty3.Netty3Listener]]) and a function,
* serveTransport, that binds a transport and a service. It will then
* dispatch requests onto a standard service stack parameterized as
* described below.
*
* @param listener The Listener from which to accept new typed
* Transports.
*
* @param serviceTransport The function used to bind an accepted
* Transport with a Service. Requests read from the transport are
* dispatched onto the Service, with replies written back.
*
* @param requestTimeout The maximum amount of time the server is
* allowed to handle a request. If the timeout expires, the server
* will cancel the future and terminate the client connection.
*
* @param maxConcurrentRequests The maximum number of concurrent
* requests the server is willing to handle.
*
* @param cancelOnHangup Enabled by default. If disabled,
* exceptions on the transport do not propagate to the transport.
*
* @param prepare Prepare the given `ServiceFactory` before use.
*/
case class DefaultServer[Req, Rep, In, Out](
name: String,
listener: Listener[In, Out],
serviceTransport: (Transport[In, Out], Service[Req, Rep]) => Closable,
requestTimeout: Duration = Duration.Top,
maxConcurrentRequests: Int = Int.MaxValue,
cancelOnHangup: Boolean = true,
prepare: ServiceFactory[Req, Rep] => ServiceFactory[Req, Rep] =
(sf: ServiceFactory[Req, Rep]) => sf,
timer: Timer = DefaultTimer.twitter,
monitor: Monitor = DefaultMonitor,
logger: java.util.logging.Logger = DefaultLogger,
statsReceiver: StatsReceiver = ServerStatsReceiver,
tracer: Tracer = DefaultTracer,
reporter: ReporterFactory = LoadedReporterFactory,
newTraceInitializer: Stackable[ServiceFactory[Req, Rep]] = TraceInitializerFilter.serverModule[Req, Rep]
) extends Server[Req, Rep] {
val stack = StackServer.newStack[Req, Rep]
.replace(StackServer.Role.preparer, prepare)
.replace(TraceInitializerFilter.role, newTraceInitializer)
private type _In = In
private type _Out = Out
private case class Server(
stack: Stack[ServiceFactory[Req, Rep]] = stack,
params: Stack.Params = Stack.Params.empty
) extends StdStackServer[Req, Rep, Server] {
protected def copy1(
stack: Stack[ServiceFactory[Req, Rep]] = this.stack,
params: Stack.Params = this.params
) = copy(stack, params)
protected type In = _In
protected type Out = _Out
protected def newListener() = listener
protected def newDispatcher(transport: Transport[In, Out], service: Service[Req, Rep]) =
serviceTransport(transport, service)
}
val underlying: StackServer[Req, Rep] = Server()
private[this] val sem =
if (maxConcurrentRequests == Int.MaxValue) None
else Some(new AsyncSemaphore(maxConcurrentRequests, 0))
val configured = underlying
.configured(param.Label(name))
.configured(param.Timer(timer))
.configured(param.Monitor(monitor))
.configured(param.Logger(logger))
.configured(param.Stats(statsReceiver))
.configured(param.Tracer(tracer))
.configured(param.Reporter(reporter))
.configured(MaskCancelFilter.Param(!cancelOnHangup))
.configured(TimeoutFilter.Param(requestTimeout))
.configured(RequestSemaphoreFilter.Param(sem))
def serve(addr: SocketAddress, factory: ServiceFactory[Req, Rep]): ListeningServer =
configured.serve(addr, factory)
}
|
a-manumohan/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/server/DefaultServer.scala
|
Scala
|
apache-2.0
| 4,023 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization.macros.impl
import scala.reflect.macros.Context
import com.twitter.scalding.serialization.OrderedSerialization
import com.twitter.scalding.serialization.macros.impl.ordered_serialization._
import com.twitter.scalding.serialization.macros.impl.ordered_serialization.providers._
object OrderedSerializationProviderImpl {
def normalizedDispatcher(c: Context)(
buildDispatcher: => PartialFunction[c.Type, TreeOrderedBuf[c.type]]): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
case tpe if !(tpe.normalize == tpe) => buildDispatcher(tpe.normalize)
}
def scaldingBasicDispatchers(c: Context)(
buildDispatcher: => PartialFunction[c.Type, TreeOrderedBuf[c.type]]): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
val primitiveDispatcher = PrimitiveOrderedBuf.dispatch(c)
val optionDispatcher = OptionOrderedBuf.dispatch(c)(buildDispatcher)
val eitherDispatcher = EitherOrderedBuf.dispatch(c)(buildDispatcher)
val caseClassDispatcher = CaseClassOrderedBuf.dispatch(c)(buildDispatcher)
val caseObjectDispatcher = CaseObjectOrderedBuf.dispatch(c)
val productDispatcher = ProductOrderedBuf.dispatch(c)(buildDispatcher)
val stringDispatcher = StringOrderedBuf.dispatch(c)
val traversablesDispatcher = TraversablesOrderedBuf.dispatch(c)(buildDispatcher)
val unitDispatcher = UnitOrderedBuf.dispatch(c)
val byteBufferDispatcher = ByteBufferOrderedBuf.dispatch(c)
val sealedTraitDispatcher = SealedTraitOrderedBuf.dispatch(c)(buildDispatcher)
OrderedSerializationProviderImpl
.normalizedDispatcher(c)(buildDispatcher)
.orElse(primitiveDispatcher)
.orElse(unitDispatcher)
.orElse(optionDispatcher)
.orElse(eitherDispatcher)
.orElse(stringDispatcher)
.orElse(byteBufferDispatcher)
.orElse(traversablesDispatcher)
.orElse(caseClassDispatcher)
.orElse(caseObjectDispatcher)
.orElse(productDispatcher)
.orElse(sealedTraitDispatcher)
}
def fallbackImplicitDispatcher(c: Context): PartialFunction[c.Type, TreeOrderedBuf[c.type]] =
ImplicitOrderedBuf.dispatch(c)
// Outer dispatcher, do not do implcit for the outermost level, makes no sense there. Should just fail.
private def outerDispatcher(c: Context): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
import c.universe._
scaldingBasicDispatchers(c)(OrderedSerializationProviderImpl.innerDispatcher(c)).orElse {
case tpe: Type =>
c.abort(c.enclosingPosition, s"""Unable to find OrderedSerialization for type ${tpe}""")
}
}
// Same as the outer dispatcher but we allow an implicit fallback for fields.
// So in essence it never fails to do a lookup
private def innerDispatcher(c: Context): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
val innerF = scaldingBasicDispatchers(c)(OrderedSerializationProviderImpl.innerDispatcher(c))
val f: PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
case tpe if innerF.isDefinedAt(tpe) =>
scala.util.Try(innerF(tpe)).getOrElse(fallbackImplicitDispatcher(c)(tpe))
case tpe => fallbackImplicitDispatcher(c)(tpe)
}
f
}
def apply[T](c: Context)(implicit T: c.WeakTypeTag[T]): c.Expr[OrderedSerialization[T]] = {
val b: TreeOrderedBuf[c.type] = outerDispatcher(c)(T.tpe)
val res = TreeOrderedBuf.toOrderedSerialization[T](c)(b)
// println(res)
res
}
}
|
jzmq/scalding
|
scalding-serialization/src/main/scala/com/twitter/scalding/serialization/macros/impl/OrderedBufferableProviderImpl.scala
|
Scala
|
apache-2.0
| 3,999 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.concurrent.atomic.AtomicInteger
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.util.control.NonFatal
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.encoderFor
import org.apache.spark.sql.catalyst.expressions.{Attribute, UnsafeRow}
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.catalyst.plans.logical.statsEstimation.EstimationUtils
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousStream, MicroBatchStream, Offset => OffsetV2}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.StructType
object MemoryStream {
protected val currentBlockId = new AtomicInteger(0)
protected val memoryStreamId = new AtomicInteger(0)
def apply[A : Encoder](implicit sqlContext: SQLContext): MemoryStream[A] =
new MemoryStream[A](memoryStreamId.getAndIncrement(), sqlContext)
}
/**
* A base class for memory stream implementations. Supports adding data and resetting.
*/
abstract class MemoryStreamBase[A : Encoder](sqlContext: SQLContext) extends BaseStreamingSource {
val encoder = encoderFor[A]
protected val attributes = encoder.schema.toAttributes
def toDS(): Dataset[A] = {
Dataset[A](sqlContext.sparkSession, logicalPlan)
}
def toDF(): DataFrame = {
Dataset.ofRows(sqlContext.sparkSession, logicalPlan)
}
def addData(data: A*): Offset = {
addData(data.toTraversable)
}
def fullSchema(): StructType = encoder.schema
protected val logicalPlan: LogicalPlan = {
StreamingRelationV2(
MemoryStreamTableProvider,
"memory",
new MemoryStreamTable(this),
Map.empty,
attributes,
None)(sqlContext.sparkSession)
}
def addData(data: TraversableOnce[A]): Offset
}
// This class is used to indicate the memory stream data source. We don't actually use it, as
// memory stream is for test only and we never look it up by name.
object MemoryStreamTableProvider extends TableProvider {
override def getTable(options: DataSourceOptions): Table = {
throw new IllegalStateException("MemoryStreamTableProvider should not be used.")
}
}
class MemoryStreamTable(val stream: MemoryStreamBase[_]) extends Table
with SupportsMicroBatchRead with SupportsContinuousRead {
override def name(): String = "MemoryStreamDataSource"
override def schema(): StructType = stream.fullSchema()
override def newScanBuilder(options: DataSourceOptions): ScanBuilder = {
new MemoryStreamScanBuilder(stream)
}
}
class MemoryStreamScanBuilder(stream: MemoryStreamBase[_]) extends ScanBuilder with Scan {
override def build(): Scan = this
override def description(): String = "MemoryStreamDataSource"
override def readSchema(): StructType = stream.fullSchema()
override def toMicroBatchStream(checkpointLocation: String): MicroBatchStream = {
stream.asInstanceOf[MicroBatchStream]
}
override def toContinuousStream(checkpointLocation: String): ContinuousStream = {
stream.asInstanceOf[ContinuousStream]
}
}
/**
* A [[Source]] that produces value stored in memory as they are added by the user. This [[Source]]
* is intended for use in unit tests as it can only replay data when the object is still
* available.
*/
case class MemoryStream[A : Encoder](id: Int, sqlContext: SQLContext)
extends MemoryStreamBase[A](sqlContext) with MicroBatchStream with Logging {
protected val output = logicalPlan.output
/**
* All batches from `lastCommittedOffset + 1` to `currentOffset`, inclusive.
* Stored in a ListBuffer to facilitate removing committed batches.
*/
@GuardedBy("this")
protected val batches = new ListBuffer[Array[UnsafeRow]]
@GuardedBy("this")
protected var currentOffset: LongOffset = new LongOffset(-1)
@GuardedBy("this")
protected var startOffset = new LongOffset(-1)
@GuardedBy("this")
private var endOffset = new LongOffset(-1)
/**
* Last offset that was discarded, or -1 if no commits have occurred. Note that the value
* -1 is used in calculations below and isn't just an arbitrary constant.
*/
@GuardedBy("this")
protected var lastOffsetCommitted : LongOffset = new LongOffset(-1)
def addData(data: TraversableOnce[A]): Offset = {
val objects = data.toSeq
val rows = objects.iterator.map(d => encoder.toRow(d).copy().asInstanceOf[UnsafeRow]).toArray
logDebug(s"Adding: $objects")
this.synchronized {
currentOffset = currentOffset + 1
batches += rows
currentOffset
}
}
override def toString: String = {
s"MemoryStream[${truncatedString(output, ",", SQLConf.get.maxToStringFields)}]"
}
override def deserializeOffset(json: String): OffsetV2 = LongOffset(json.toLong)
override def initialOffset: OffsetV2 = LongOffset(-1)
override def latestOffset(): OffsetV2 = {
if (currentOffset.offset == -1) null else currentOffset
}
override def planInputPartitions(start: OffsetV2, end: OffsetV2): Array[InputPartition] = {
val startOffset = start.asInstanceOf[LongOffset]
val endOffset = end.asInstanceOf[LongOffset]
synchronized {
// Compute the internal batch numbers to fetch: [startOrdinal, endOrdinal)
val startOrdinal = startOffset.offset.toInt + 1
val endOrdinal = endOffset.offset.toInt + 1
// Internal buffer only holds the batches after lastCommittedOffset.
val newBlocks = synchronized {
val sliceStart = startOrdinal - lastOffsetCommitted.offset.toInt - 1
val sliceEnd = endOrdinal - lastOffsetCommitted.offset.toInt - 1
assert(sliceStart <= sliceEnd, s"sliceStart: $sliceStart sliceEnd: $sliceEnd")
batches.slice(sliceStart, sliceEnd)
}
logDebug(generateDebugString(newBlocks.flatten, startOrdinal, endOrdinal))
newBlocks.map { block =>
new MemoryStreamInputPartition(block)
}.toArray
}
}
override def createReaderFactory(): PartitionReaderFactory = {
MemoryStreamReaderFactory
}
private def generateDebugString(
rows: Seq[UnsafeRow],
startOrdinal: Int,
endOrdinal: Int): String = {
val fromRow = encoder.resolveAndBind().fromRow _
s"MemoryBatch [$startOrdinal, $endOrdinal]: " +
s"${rows.map(row => fromRow(row)).mkString(", ")}"
}
override def commit(end: OffsetV2): Unit = synchronized {
def check(newOffset: LongOffset): Unit = {
val offsetDiff = (newOffset.offset - lastOffsetCommitted.offset).toInt
if (offsetDiff < 0) {
sys.error(s"Offsets committed out of order: $lastOffsetCommitted followed by $end")
}
batches.trimStart(offsetDiff)
lastOffsetCommitted = newOffset
}
LongOffset.convert(end) match {
case Some(lo) => check(lo)
case None => sys.error(s"MemoryStream.commit() received an offset ($end) " +
"that did not originate with an instance of this class")
}
}
override def stop() {}
def reset(): Unit = synchronized {
batches.clear()
startOffset = LongOffset(-1)
endOffset = LongOffset(-1)
currentOffset = new LongOffset(-1)
lastOffsetCommitted = new LongOffset(-1)
}
}
class MemoryStreamInputPartition(val records: Array[UnsafeRow]) extends InputPartition
object MemoryStreamReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val records = partition.asInstanceOf[MemoryStreamInputPartition].records
new PartitionReader[InternalRow] {
private var currentIndex = -1
override def next(): Boolean = {
// Return true as long as the new index is in the array.
currentIndex += 1
currentIndex < records.length
}
override def get(): UnsafeRow = records(currentIndex)
override def close(): Unit = {}
}
}
}
/** A common trait for MemorySinks with methods used for testing */
trait MemorySinkBase extends BaseStreamingSink {
def allData: Seq[Row]
def latestBatchData: Seq[Row]
def dataSinceBatch(sinceBatchId: Long): Seq[Row]
def latestBatchId: Option[Long]
}
/**
* A sink that stores the results in memory. This [[Sink]] is primarily intended for use in unit
* tests and does not provide durability.
*/
class MemorySink(val schema: StructType, outputMode: OutputMode) extends Sink
with MemorySinkBase with Logging {
private case class AddedData(batchId: Long, data: Array[Row])
/** An order list of batches that have been written to this [[Sink]]. */
@GuardedBy("this")
private val batches = new ArrayBuffer[AddedData]()
/** Returns all rows that are stored in this [[Sink]]. */
def allData: Seq[Row] = synchronized {
batches.flatMap(_.data)
}
def latestBatchId: Option[Long] = synchronized {
batches.lastOption.map(_.batchId)
}
def latestBatchData: Seq[Row] = synchronized { batches.lastOption.toSeq.flatten(_.data) }
def dataSinceBatch(sinceBatchId: Long): Seq[Row] = synchronized {
batches.filter(_.batchId > sinceBatchId).flatMap(_.data)
}
def toDebugString: String = synchronized {
batches.map { case AddedData(batchId, data) =>
val dataStr = try data.mkString(" ") catch {
case NonFatal(e) => "[Error converting to string]"
}
s"$batchId: $dataStr"
}.mkString("\\n")
}
override def addBatch(batchId: Long, data: DataFrame): Unit = {
val notCommitted = synchronized {
latestBatchId.isEmpty || batchId > latestBatchId.get
}
if (notCommitted) {
logDebug(s"Committing batch $batchId to $this")
outputMode match {
case Append | Update =>
val rows = AddedData(batchId, data.collect())
synchronized { batches += rows }
case Complete =>
val rows = AddedData(batchId, data.collect())
synchronized {
batches.clear()
batches += rows
}
case _ =>
throw new IllegalArgumentException(
s"Output mode $outputMode is not supported by MemorySink")
}
} else {
logDebug(s"Skipping already committed batch: $batchId")
}
}
def clear(): Unit = synchronized {
batches.clear()
}
override def toString(): String = "MemorySink"
}
/**
* Used to query the data that has been written into a [[MemorySink]].
*/
case class MemoryPlan(sink: MemorySink, output: Seq[Attribute]) extends LeafNode {
def this(sink: MemorySink) = this(sink, sink.schema.toAttributes)
private val sizePerRow = EstimationUtils.getSizePerRow(sink.schema.toAttributes)
override def computeStats(): Statistics = Statistics(sizePerRow * sink.allData.size)
}
|
WindCanDie/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala
|
Scala
|
apache-2.0
| 11,905 |
import org.broadinstitute.gatk.queue.QScript
import org.broadinstitute.gatk.queue.extensions.gatk._
class callVariants extends QScript {
def script() {
val hc = new HaplotypeCaller
hc.reference_sequence = new File ("hg42.fa")
hc.standard_min_confidence_threshold_for_emitting = 10
hc.standard_min_confidence_threshold_for_calling = 30
hc.input_file :+= new File ("bogusbams/bogus1.bam")
hc.out = new File ("bogusbams/bogus1.bam.vcf")
hc.scatterCount = 20
hc.memoryLimit = 2
add(hc)
}
}
|
michael-weinstein/GATKbyDirectory
|
bogus1.bam.scatter.scala
|
Scala
|
gpl-3.0
| 506 |
import java.io._
import Implicits._
import akka.event.LoggingAdapter
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import scala.sys.process.{ProcessIO, _}
/**
* Created by johan on 11.09.2016.
*/
object Implicits {
implicit class Expiry(val d: Deadline) extends AnyVal {
def expiring(f: => Unit) =
Future(Await.ready(Promise().future, d.timeLeft)) onComplete (_ => f)
}
}
trait processCom extends types {
def logger: LoggingAdapter
var outputStream: OutputStream = new OutputStream {
override def write(b: Int): Unit = ???
}
var inputStream: InputStream= new InputStream {
override def read(): Int = ???
}
def initFaceDetection(): Unit = {
Seq("th", "/mnt/vggface/demo.lua") run new ProcessIO(writeJob, readJob, errJob)
2 seconds fromNow expiring {
writeToStream("/mnt/vggface/ak.png")
}
}
def writeToStream(input: String): Vector = {
logger.info("issues another command"+input)
logger.info("bla")
outputStream.write((input+"\\n").getBytes())
outputStream.flush()
val inputStreamReader = new InputStreamReader(inputStream)
val bufferedReader = new BufferedReader(inputStreamReader)
var count = 0
val res = Iterator continually bufferedReader.readLine() takeWhile{
byte: String => {
inputStream.available() > 0
}
}
val res2 = res.toList.dropRight(1).map(_.toDouble)
logger.info("no more blocking"+res2.length.toString)
res2
}
def readJob(in: InputStream) {
inputStream = in
// do smthing with in
}
def writeJob(out: OutputStream) {
outputStream = out
}
def errJob(err: InputStream) {
// do smthing with err
}
}
|
Sukram21/FlixFace
|
src/main/scala/processCom.scala
|
Scala
|
mit
| 1,783 |
package com.twitter.diffy.proxy
import javax.inject.Singleton
import com.google.inject.Provides
import com.twitter.diffy.analysis._
import com.twitter.diffy.lifter.{FieldMap, Message}
import com.twitter.finagle._
import com.twitter.inject.TwitterModule
import com.twitter.util._
import org.apache.log4j
object DifferenceProxyModule extends TwitterModule {
@Provides
@Singleton
def providesDifferenceProxy(
settings: Settings,
collector: InMemoryDifferenceCollector,
joinedDifferences: JoinedDifferences,
analyzer: DifferenceAnalyzer
): DifferenceProxy =
settings.protocol match {
case "thrift" => ThriftDifferenceProxy(settings, collector, joinedDifferences, analyzer)
case "http" => SimpleHttpDifferenceProxy(settings, collector, joinedDifferences, analyzer)
case "https" => SimpleHttpsDifferenceProxy(settings, collector, joinedDifferences, analyzer)
}
}
object DifferenceProxy {
object NoResponseException extends Exception("No responses provided by diffy")
val NoResponseExceptionFuture = Future.exception(NoResponseException)
val log = log4j.Logger.getLogger(classOf[DifferenceProxy])
}
trait DifferenceProxy {
import DifferenceProxy._
type Req
type Rep
type Srv <: ClientService[Req, Rep]
val server: ListeningServer
val settings: Settings
var lastReset: Time = Time.now
def serviceFactory(serverset: String, label: String): Srv
def liftRequest(req: Req): Future[Message]
def liftResponse(rep: Try[Rep]): Future[Message]
// Clients for services
val candidate = serviceFactory(settings.candidate.path, "candidate")
val primary = serviceFactory(settings.primary.path, "primary")
val secondary = serviceFactory(settings.secondary.path, "secondary")
val collector: InMemoryDifferenceCollector
val joinedDifferences: JoinedDifferences
val analyzer: DifferenceAnalyzer
private[this] lazy val multicastHandler =
new SequentialMulticastService(Seq(primary.client, candidate.client, secondary.client))
def proxy = new Service[Req, Rep] {
override def apply(req: Req): Future[Rep] = {
log.info(s"Proxy request: $req")
val rawResponses =
multicastHandler(req) respond {
case Return(_) => log.info("success networking")
case Throw(t) => log.error("error networking", t)
}
val responses: Future[Seq[Message]] =
rawResponses flatMap { reps =>
Future.collect(reps map liftResponse) respond {
case Return(rs) =>
log.info(s"success lifting ${rs.head.endpoint}")
case Throw(t) => log.error(s"error lifting req: $req", t)
}
}
responses.rescue {
case ex: Throwable =>
// Generic difference in case of (one or more services are down, etc)
Future.const(Try(Seq[Message](
Message(Some("200"), FieldMap(Map())),
Message(Some("404"), FieldMap(Map())),
Message(Some("200"), FieldMap(Map()))
)))
} foreach {
case Seq(primaryResponse, candidateResponse, secondaryResponse) =>
liftRequest(req) respond {
case Return(m) =>
log.info(s"success lifting request for ${m.endpoint}")
case Throw(t) => log.error("error lifting request", t)
} foreach { req =>
analyzer(req, candidateResponse, primaryResponse, secondaryResponse)
}
}
NoResponseExceptionFuture
}
}
def clear() = {
lastReset = Time.now
analyzer.clear()
}
}
|
SDUUitgevers/diffy
|
src/main/scala/com/twitter/diffy/proxy/DifferenceProxy.scala
|
Scala
|
apache-2.0
| 3,547 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import org.apache.spark.sql.catalyst.analysis.{GetColumnByOrdinal, UnresolvedAttribute, UnresolvedExtractValue}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.util.{DateTimeUtils, GenericArrayData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
/**
* A helper trait to create [[org.apache.spark.sql.catalyst.encoders.ExpressionEncoder]]s
* for classes whose fields are entirely defined by constructor params but should not be
* case classes.
*/
trait DefinedByConstructorParams
/**
* A default version of ScalaReflection that uses the runtime universe.
*/
object ScalaReflection extends ScalaReflection {
val universe: scala.reflect.runtime.universe.type = scala.reflect.runtime.universe
// Since we are creating a runtime mirror using the class loader of current thread,
// we need to use def at here. So, every time we call mirror, it is using the
// class loader of the current thread.
override def mirror: universe.Mirror = {
universe.runtimeMirror(Thread.currentThread().getContextClassLoader)
}
import universe._
// The Predef.Map is scala.collection.immutable.Map.
// Since the map values can be mutable, we explicitly import scala.collection.Map at here.
import scala.collection.Map
/**
* Returns the Spark SQL DataType for a given scala type. Where this is not an exact mapping
* to a native type, an ObjectType is returned. Special handling is also used for Arrays including
* those that hold primitive types.
*
* Unlike `schemaFor`, this function doesn't do any massaging of types into the Spark SQL type
* system. As a result, ObjectType will be returned for things like boxed Integers
*/
def dataTypeFor[T : TypeTag]: DataType = dataTypeFor(localTypeOf[T])
private def dataTypeFor(tpe: `Type`): DataType = {
tpe.dealias match {
case t if t <:< definitions.IntTpe => IntegerType
case t if t <:< definitions.LongTpe => LongType
case t if t <:< definitions.DoubleTpe => DoubleType
case t if t <:< definitions.FloatTpe => FloatType
case t if t <:< definitions.ShortTpe => ShortType
case t if t <:< definitions.ByteTpe => ByteType
case t if t <:< definitions.BooleanTpe => BooleanType
case t if t <:< localTypeOf[Array[Byte]] => BinaryType
case t if t <:< localTypeOf[CalendarInterval] => CalendarIntervalType
case t if t <:< localTypeOf[Decimal] => DecimalType.SYSTEM_DEFAULT
case _ =>
val className = getClassNameFromType(tpe)
className match {
case "scala.Array" =>
val TypeRef(_, _, Seq(elementType)) = tpe
arrayClassFor(elementType)
case other =>
val clazz = getClassFromType(tpe)
ObjectType(clazz)
}
}
}
/**
* Given a type `T` this function constructs `ObjectType` that holds a class of type
* `Array[T]`.
*
* Special handling is performed for primitive types to map them back to their raw
* JVM form instead of the Scala Array that handles auto boxing.
*/
private def arrayClassFor(tpe: `Type`): ObjectType = {
val cls = tpe.dealias match {
case t if t <:< definitions.IntTpe => classOf[Array[Int]]
case t if t <:< definitions.LongTpe => classOf[Array[Long]]
case t if t <:< definitions.DoubleTpe => classOf[Array[Double]]
case t if t <:< definitions.FloatTpe => classOf[Array[Float]]
case t if t <:< definitions.ShortTpe => classOf[Array[Short]]
case t if t <:< definitions.ByteTpe => classOf[Array[Byte]]
case t if t <:< definitions.BooleanTpe => classOf[Array[Boolean]]
case other =>
// There is probably a better way to do this, but I couldn't find it...
val elementType = dataTypeFor(other).asInstanceOf[ObjectType].cls
java.lang.reflect.Array.newInstance(elementType, 1).getClass
}
ObjectType(cls)
}
/**
* Returns true if the value of this data type is same between internal and external.
*/
def isNativeType(dt: DataType): Boolean = dt match {
case NullType | BooleanType | ByteType | ShortType | IntegerType | LongType |
FloatType | DoubleType | BinaryType | CalendarIntervalType => true
case _ => false
}
/**
* Returns an expression that can be used to deserialize an input row to an object of type `T`
* with a compatible schema. Fields of the row will be extracted using UnresolvedAttributes
* of the same name as the constructor arguments. Nested classes will have their fields accessed
* using UnresolvedExtractValue.
*
* When used on a primitive type, the constructor will instead default to extracting the value
* from ordinal 0 (since there are no names to map to). The actual location can be moved by
* calling resolve/bind with a new schema.
*/
def deserializerFor[T : TypeTag]: Expression = {
val tpe = localTypeOf[T]
val clsName = getClassNameFromType(tpe)
val walkedTypePath = s"""- root class: "$clsName"""" :: Nil
deserializerFor(tpe, None, walkedTypePath)
}
private def deserializerFor(
tpe: `Type`,
path: Option[Expression],
walkedTypePath: Seq[String]): Expression = {
/** Returns the current path with a sub-field extracted. */
def addToPath(part: String, dataType: DataType, walkedTypePath: Seq[String]): Expression = {
val newPath = path
.map(p => UnresolvedExtractValue(p, expressions.Literal(part)))
.getOrElse(UnresolvedAttribute(part))
upCastToExpectedType(newPath, dataType, walkedTypePath)
}
/** Returns the current path with a field at ordinal extracted. */
def addToPathOrdinal(
ordinal: Int,
dataType: DataType,
walkedTypePath: Seq[String]): Expression = {
val newPath = path
.map(p => GetStructField(p, ordinal))
.getOrElse(GetColumnByOrdinal(ordinal, dataType))
upCastToExpectedType(newPath, dataType, walkedTypePath)
}
/** Returns the current path or `GetColumnByOrdinal`. */
def getPath: Expression = {
val dataType = schemaFor(tpe).dataType
if (path.isDefined) {
path.get
} else {
upCastToExpectedType(GetColumnByOrdinal(0, dataType), dataType, walkedTypePath)
}
}
/**
* When we build the `deserializer` for an encoder, we set up a lot of "unresolved" stuff
* and lost the required data type, which may lead to runtime error if the real type doesn't
* match the encoder's schema.
* For example, we build an encoder for `case class Data(a: Int, b: String)` and the real type
* is [a: int, b: long], then we will hit runtime error and say that we can't construct class
* `Data` with int and long, because we lost the information that `b` should be a string.
*
* This method help us "remember" the required data type by adding a `UpCast`. Note that we
* only need to do this for leaf nodes.
*/
def upCastToExpectedType(
expr: Expression,
expected: DataType,
walkedTypePath: Seq[String]): Expression = expected match {
case _: StructType => expr
case _: ArrayType => expr
// TODO: ideally we should also skip MapType, but nested StructType inside MapType is rare and
// it's not trivial to support by-name resolution for StructType inside MapType.
case _ => UpCast(expr, expected, walkedTypePath)
}
tpe.dealias match {
case t if !dataTypeFor(t).isInstanceOf[ObjectType] => getPath
case t if t <:< localTypeOf[Option[_]] =>
val TypeRef(_, _, Seq(optType)) = t
val className = getClassNameFromType(optType)
val newTypePath = s"""- option value class: "$className"""" +: walkedTypePath
WrapOption(deserializerFor(optType, path, newTypePath), dataTypeFor(optType))
case t if t <:< localTypeOf[java.lang.Integer] =>
val boxedType = classOf[java.lang.Integer]
val objectType = ObjectType(boxedType)
StaticInvoke(boxedType, objectType, "valueOf", getPath :: Nil, returnNullable = false)
case t if t <:< localTypeOf[java.lang.Long] =>
val boxedType = classOf[java.lang.Long]
val objectType = ObjectType(boxedType)
StaticInvoke(boxedType, objectType, "valueOf", getPath :: Nil, returnNullable = false)
case t if t <:< localTypeOf[java.lang.Double] =>
val boxedType = classOf[java.lang.Double]
val objectType = ObjectType(boxedType)
StaticInvoke(boxedType, objectType, "valueOf", getPath :: Nil, returnNullable = false)
case t if t <:< localTypeOf[java.lang.Float] =>
val boxedType = classOf[java.lang.Float]
val objectType = ObjectType(boxedType)
StaticInvoke(boxedType, objectType, "valueOf", getPath :: Nil, returnNullable = false)
case t if t <:< localTypeOf[java.lang.Short] =>
val boxedType = classOf[java.lang.Short]
val objectType = ObjectType(boxedType)
StaticInvoke(boxedType, objectType, "valueOf", getPath :: Nil, returnNullable = false)
case t if t <:< localTypeOf[java.lang.Byte] =>
val boxedType = classOf[java.lang.Byte]
val objectType = ObjectType(boxedType)
StaticInvoke(boxedType, objectType, "valueOf", getPath :: Nil, returnNullable = false)
case t if t <:< localTypeOf[java.lang.Boolean] =>
val boxedType = classOf[java.lang.Boolean]
val objectType = ObjectType(boxedType)
StaticInvoke(boxedType, objectType, "valueOf", getPath :: Nil, returnNullable = false)
case t if t <:< localTypeOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(classOf[java.sql.Date]),
"toJavaDate",
getPath :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(classOf[java.sql.Timestamp]),
"toJavaTimestamp",
getPath :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[java.lang.String] =>
Invoke(getPath, "toString", ObjectType(classOf[String]), returnNullable = false)
case t if t <:< localTypeOf[java.math.BigDecimal] =>
Invoke(getPath, "toJavaBigDecimal", ObjectType(classOf[java.math.BigDecimal]),
returnNullable = false)
case t if t <:< localTypeOf[BigDecimal] =>
Invoke(getPath, "toBigDecimal", ObjectType(classOf[BigDecimal]), returnNullable = false)
case t if t <:< localTypeOf[java.math.BigInteger] =>
Invoke(getPath, "toJavaBigInteger", ObjectType(classOf[java.math.BigInteger]),
returnNullable = false)
case t if t <:< localTypeOf[scala.math.BigInt] =>
Invoke(getPath, "toScalaBigInt", ObjectType(classOf[scala.math.BigInt]),
returnNullable = false)
case t if t <:< localTypeOf[Array[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
val Schema(dataType, elementNullable) = schemaFor(elementType)
val className = getClassNameFromType(elementType)
val newTypePath = s"""- array element class: "$className"""" +: walkedTypePath
val mapFunction: Expression => Expression = element => {
// upcast the array element to the data type the encoder expected.
val casted = upCastToExpectedType(element, dataType, newTypePath)
val converter = deserializerFor(elementType, Some(casted), newTypePath)
if (elementNullable) {
converter
} else {
AssertNotNull(converter, newTypePath)
}
}
val arrayData = UnresolvedMapObjects(mapFunction, getPath)
val arrayCls = arrayClassFor(elementType)
if (elementNullable) {
Invoke(arrayData, "array", arrayCls, returnNullable = false)
} else {
val primitiveMethod = elementType match {
case t if t <:< definitions.IntTpe => "toIntArray"
case t if t <:< definitions.LongTpe => "toLongArray"
case t if t <:< definitions.DoubleTpe => "toDoubleArray"
case t if t <:< definitions.FloatTpe => "toFloatArray"
case t if t <:< definitions.ShortTpe => "toShortArray"
case t if t <:< definitions.ByteTpe => "toByteArray"
case t if t <:< definitions.BooleanTpe => "toBooleanArray"
case other => throw new IllegalStateException("expect primitive array element type " +
"but got " + other)
}
Invoke(arrayData, primitiveMethod, arrayCls, returnNullable = false)
}
// We serialize a `Set` to Catalyst array. When we deserialize a Catalyst array
// to a `Set`, if there are duplicated elements, the elements will be de-duplicated.
case t if t <:< localTypeOf[Seq[_]] ||
t <:< localTypeOf[scala.collection.Set[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
val Schema(dataType, elementNullable) = schemaFor(elementType)
val className = getClassNameFromType(elementType)
val newTypePath = s"""- array element class: "$className"""" +: walkedTypePath
val mapFunction: Expression => Expression = element => {
// upcast the array element to the data type the encoder expected.
val casted = upCastToExpectedType(element, dataType, newTypePath)
val converter = deserializerFor(elementType, Some(casted), newTypePath)
if (elementNullable) {
converter
} else {
AssertNotNull(converter, newTypePath)
}
}
val companion = t.dealias.typeSymbol.companion.typeSignature
val cls = companion.member(TermName("newBuilder")) match {
case NoSymbol if t <:< localTypeOf[Seq[_]] => classOf[Seq[_]]
case NoSymbol if t <:< localTypeOf[scala.collection.Set[_]] =>
classOf[scala.collection.Set[_]]
case _ => mirror.runtimeClass(t.typeSymbol.asClass)
}
UnresolvedMapObjects(mapFunction, getPath, Some(cls))
case t if t <:< localTypeOf[Map[_, _]] =>
// TODO: add walked type path for map
val TypeRef(_, _, Seq(keyType, valueType)) = t
CatalystToExternalMap(
p => deserializerFor(keyType, Some(p), walkedTypePath),
p => deserializerFor(valueType, Some(p), walkedTypePath),
getPath,
mirror.runtimeClass(t.typeSymbol.asClass)
)
case t if t.typeSymbol.annotations.exists(_.tree.tpe =:= typeOf[SQLUserDefinedType]) =>
val udt = getClassFromType(t).getAnnotation(classOf[SQLUserDefinedType]).udt().newInstance()
val obj = NewInstance(
udt.userClass.getAnnotation(classOf[SQLUserDefinedType]).udt(),
Nil,
dataType = ObjectType(udt.userClass.getAnnotation(classOf[SQLUserDefinedType]).udt()))
Invoke(obj, "deserialize", ObjectType(udt.userClass), getPath :: Nil)
case t if UDTRegistration.exists(getClassNameFromType(t)) =>
val udt = UDTRegistration.getUDTFor(getClassNameFromType(t)).get.newInstance()
.asInstanceOf[UserDefinedType[_]]
val obj = NewInstance(
udt.getClass,
Nil,
dataType = ObjectType(udt.getClass))
Invoke(obj, "deserialize", ObjectType(udt.userClass), getPath :: Nil)
case t if definedByConstructorParams(t) =>
val params = getConstructorParameters(t)
val cls = getClassFromType(tpe)
val arguments = params.zipWithIndex.map { case ((fieldName, fieldType), i) =>
val Schema(dataType, nullable) = schemaFor(fieldType)
val clsName = getClassNameFromType(fieldType)
val newTypePath = s"""- field (class: "$clsName", name: "$fieldName")""" +: walkedTypePath
// For tuples, we based grab the inner fields by ordinal instead of name.
if (cls.getName startsWith "scala.Tuple") {
deserializerFor(
fieldType,
Some(addToPathOrdinal(i, dataType, newTypePath)),
newTypePath)
} else {
val constructor = deserializerFor(
fieldType,
Some(addToPath(fieldName, dataType, newTypePath)),
newTypePath)
if (!nullable) {
AssertNotNull(constructor, newTypePath)
} else {
constructor
}
}
}
val newInstance = NewInstance(cls, arguments, ObjectType(cls), propagateNull = false)
if (path.nonEmpty) {
expressions.If(
IsNull(getPath),
expressions.Literal.create(null, ObjectType(cls)),
newInstance
)
} else {
newInstance
}
}
}
/**
* Returns an expression for serializing an object of type T to an internal row.
*
* If the given type is not supported, i.e. there is no encoder can be built for this type,
* an [[UnsupportedOperationException]] will be thrown with detailed error message to explain
* the type path walked so far and which class we are not supporting.
* There are 4 kinds of type path:
* * the root type: `root class: "abc.xyz.MyClass"`
* * the value type of [[Option]]: `option value class: "abc.xyz.MyClass"`
* * the element type of [[Array]] or [[Seq]]: `array element class: "abc.xyz.MyClass"`
* * the field of [[Product]]: `field (class: "abc.xyz.MyClass", name: "myField")`
*/
def serializerFor[T : TypeTag](inputObject: Expression): CreateNamedStruct = {
val tpe = localTypeOf[T]
val clsName = getClassNameFromType(tpe)
val walkedTypePath = s"""- root class: "$clsName"""" :: Nil
serializerFor(inputObject, tpe, walkedTypePath) match {
case expressions.If(_, _, s: CreateNamedStruct) if definedByConstructorParams(tpe) => s
case other => CreateNamedStruct(expressions.Literal("value") :: other :: Nil)
}
}
/** Helper for extracting internal fields from a case class. */
private def serializerFor(
inputObject: Expression,
tpe: `Type`,
walkedTypePath: Seq[String],
seenTypeSet: Set[`Type`] = Set.empty): Expression = {
def toCatalystArray(input: Expression, elementType: `Type`): Expression = {
dataTypeFor(elementType) match {
case dt: ObjectType =>
val clsName = getClassNameFromType(elementType)
val newPath = s"""- array element class: "$clsName"""" +: walkedTypePath
MapObjects(serializerFor(_, elementType, newPath, seenTypeSet), input, dt)
case dt @ (BooleanType | ByteType | ShortType | IntegerType | LongType |
FloatType | DoubleType) =>
val cls = input.dataType.asInstanceOf[ObjectType].cls
if (cls.isArray && cls.getComponentType.isPrimitive) {
StaticInvoke(
classOf[UnsafeArrayData],
ArrayType(dt, false),
"fromPrimitiveArray",
input :: Nil,
returnNullable = false)
} else {
NewInstance(
classOf[GenericArrayData],
input :: Nil,
dataType = ArrayType(dt, schemaFor(elementType).nullable))
}
case dt =>
NewInstance(
classOf[GenericArrayData],
input :: Nil,
dataType = ArrayType(dt, schemaFor(elementType).nullable))
}
}
tpe.dealias match {
case _ if !inputObject.dataType.isInstanceOf[ObjectType] => inputObject
case t if t <:< localTypeOf[Option[_]] =>
val TypeRef(_, _, Seq(optType)) = t
val className = getClassNameFromType(optType)
val newPath = s"""- option value class: "$className"""" +: walkedTypePath
val unwrapped = UnwrapOption(dataTypeFor(optType), inputObject)
serializerFor(unwrapped, optType, newPath, seenTypeSet)
// Since List[_] also belongs to localTypeOf[Product], we put this case before
// "case t if definedByConstructorParams(t)" to make sure it will match to the
// case "localTypeOf[Seq[_]]"
case t if t <:< localTypeOf[Seq[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
toCatalystArray(inputObject, elementType)
case t if t <:< localTypeOf[Array[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
toCatalystArray(inputObject, elementType)
case t if t <:< localTypeOf[Map[_, _]] =>
val TypeRef(_, _, Seq(keyType, valueType)) = t
val keyClsName = getClassNameFromType(keyType)
val valueClsName = getClassNameFromType(valueType)
val keyPath = s"""- map key class: "$keyClsName"""" +: walkedTypePath
val valuePath = s"""- map value class: "$valueClsName"""" +: walkedTypePath
ExternalMapToCatalyst(
inputObject,
dataTypeFor(keyType),
serializerFor(_, keyType, keyPath, seenTypeSet),
keyNullable = !keyType.typeSymbol.asClass.isPrimitive,
dataTypeFor(valueType),
serializerFor(_, valueType, valuePath, seenTypeSet),
valueNullable = !valueType.typeSymbol.asClass.isPrimitive)
case t if t <:< localTypeOf[scala.collection.Set[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
// There's no corresponding Catalyst type for `Set`, we serialize a `Set` to Catalyst array.
// Note that the property of `Set` is only kept when manipulating the data as domain object.
val newInput =
Invoke(
inputObject,
"toSeq",
ObjectType(classOf[Seq[_]]))
toCatalystArray(newInput, elementType)
case t if t <:< localTypeOf[String] =>
StaticInvoke(
classOf[UTF8String],
StringType,
"fromString",
inputObject :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
TimestampType,
"fromJavaTimestamp",
inputObject :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
DateType,
"fromJavaDate",
inputObject :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[BigDecimal] =>
StaticInvoke(
Decimal.getClass,
DecimalType.SYSTEM_DEFAULT,
"apply",
inputObject :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[java.math.BigDecimal] =>
StaticInvoke(
Decimal.getClass,
DecimalType.SYSTEM_DEFAULT,
"apply",
inputObject :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[java.math.BigInteger] =>
StaticInvoke(
Decimal.getClass,
DecimalType.BigIntDecimal,
"apply",
inputObject :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[scala.math.BigInt] =>
StaticInvoke(
Decimal.getClass,
DecimalType.BigIntDecimal,
"apply",
inputObject :: Nil,
returnNullable = false)
case t if t <:< localTypeOf[java.lang.Integer] =>
Invoke(inputObject, "intValue", IntegerType)
case t if t <:< localTypeOf[java.lang.Long] =>
Invoke(inputObject, "longValue", LongType)
case t if t <:< localTypeOf[java.lang.Double] =>
Invoke(inputObject, "doubleValue", DoubleType)
case t if t <:< localTypeOf[java.lang.Float] =>
Invoke(inputObject, "floatValue", FloatType)
case t if t <:< localTypeOf[java.lang.Short] =>
Invoke(inputObject, "shortValue", ShortType)
case t if t <:< localTypeOf[java.lang.Byte] =>
Invoke(inputObject, "byteValue", ByteType)
case t if t <:< localTypeOf[java.lang.Boolean] =>
Invoke(inputObject, "booleanValue", BooleanType)
case t if t.typeSymbol.annotations.exists(_.tree.tpe =:= typeOf[SQLUserDefinedType]) =>
val udt = getClassFromType(t)
.getAnnotation(classOf[SQLUserDefinedType]).udt().newInstance()
val obj = NewInstance(
udt.userClass.getAnnotation(classOf[SQLUserDefinedType]).udt(),
Nil,
dataType = ObjectType(udt.userClass.getAnnotation(classOf[SQLUserDefinedType]).udt()))
Invoke(obj, "serialize", udt, inputObject :: Nil)
case t if UDTRegistration.exists(getClassNameFromType(t)) =>
val udt = UDTRegistration.getUDTFor(getClassNameFromType(t)).get.newInstance()
.asInstanceOf[UserDefinedType[_]]
val obj = NewInstance(
udt.getClass,
Nil,
dataType = ObjectType(udt.getClass))
Invoke(obj, "serialize", udt, inputObject :: Nil)
case t if definedByConstructorParams(t) =>
if (seenTypeSet.contains(t)) {
throw new UnsupportedOperationException(
s"cannot have circular references in class, but got the circular reference of class $t")
}
val params = getConstructorParameters(t)
val nonNullOutput = CreateNamedStruct(params.flatMap { case (fieldName, fieldType) =>
if (javaKeywords.contains(fieldName)) {
throw new UnsupportedOperationException(s"`$fieldName` is a reserved keyword and " +
"cannot be used as field name\\n" + walkedTypePath.mkString("\\n"))
}
val fieldValue = Invoke(
AssertNotNull(inputObject, walkedTypePath), fieldName, dataTypeFor(fieldType),
returnNullable = !fieldType.typeSymbol.asClass.isPrimitive)
val clsName = getClassNameFromType(fieldType)
val newPath = s"""- field (class: "$clsName", name: "$fieldName")""" +: walkedTypePath
expressions.Literal(fieldName) ::
serializerFor(fieldValue, fieldType, newPath, seenTypeSet + t) :: Nil
})
val nullOutput = expressions.Literal.create(null, nonNullOutput.dataType)
expressions.If(IsNull(inputObject), nullOutput, nonNullOutput)
case other =>
throw new UnsupportedOperationException(
s"No Encoder found for $tpe\\n" + walkedTypePath.mkString("\\n"))
}
}
/**
* Returns true if the given type is option of product type, e.g. `Option[Tuple2]`. Note that,
* we also treat [[DefinedByConstructorParams]] as product type.
*/
def optionOfProductType(tpe: `Type`): Boolean = {
tpe.dealias match {
case t if t <:< localTypeOf[Option[_]] =>
val TypeRef(_, _, Seq(optType)) = t
definedByConstructorParams(optType)
case _ => false
}
}
/**
* Returns the parameter names and types for the primary constructor of this class.
*
* Note that it only works for scala classes with primary constructor, and currently doesn't
* support inner class.
*/
def getConstructorParameters(cls: Class[_]): Seq[(String, Type)] = {
val m = runtimeMirror(cls.getClassLoader)
val classSymbol = m.staticClass(cls.getName)
val t = classSymbol.selfType
getConstructorParameters(t)
}
/**
* Returns the parameter names for the primary constructor of this class.
*
* Logically we should call `getConstructorParameters` and throw away the parameter types to get
* parameter names, however there are some weird scala reflection problems and this method is a
* workaround to avoid getting parameter types.
*/
def getConstructorParameterNames(cls: Class[_]): Seq[String] = {
val m = runtimeMirror(cls.getClassLoader)
val classSymbol = m.staticClass(cls.getName)
val t = classSymbol.selfType
constructParams(t).map(_.name.toString)
}
/**
* Returns the parameter values for the primary constructor of this class.
*/
def getConstructorParameterValues(obj: DefinedByConstructorParams): Seq[AnyRef] = {
getConstructorParameterNames(obj.getClass).map { name =>
obj.getClass.getMethod(name).invoke(obj)
}
}
/*
* Retrieves the runtime class corresponding to the provided type.
*/
def getClassFromType(tpe: Type): Class[_] = mirror.runtimeClass(tpe.dealias.typeSymbol.asClass)
case class Schema(dataType: DataType, nullable: Boolean)
/** Returns a Sequence of attributes for the given case class type. */
def attributesFor[T: TypeTag]: Seq[Attribute] = schemaFor[T] match {
case Schema(s: StructType, _) =>
s.toAttributes
}
/** Returns a catalyst DataType and its nullability for the given Scala Type using reflection. */
def schemaFor[T: TypeTag]: Schema = schemaFor(localTypeOf[T])
/** Returns a catalyst DataType and its nullability for the given Scala Type using reflection. */
def schemaFor(tpe: `Type`): Schema = {
tpe.dealias match {
case t if t.typeSymbol.annotations.exists(_.tree.tpe =:= typeOf[SQLUserDefinedType]) =>
val udt = getClassFromType(t).getAnnotation(classOf[SQLUserDefinedType]).udt().newInstance()
Schema(udt, nullable = true)
case t if UDTRegistration.exists(getClassNameFromType(t)) =>
val udt = UDTRegistration.getUDTFor(getClassNameFromType(t)).get.newInstance()
.asInstanceOf[UserDefinedType[_]]
Schema(udt, nullable = true)
case t if t <:< localTypeOf[Option[_]] =>
val TypeRef(_, _, Seq(optType)) = t
Schema(schemaFor(optType).dataType, nullable = true)
case t if t <:< localTypeOf[Array[Byte]] => Schema(BinaryType, nullable = true)
case t if t <:< localTypeOf[Array[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
val Schema(dataType, nullable) = schemaFor(elementType)
Schema(ArrayType(dataType, containsNull = nullable), nullable = true)
case t if t <:< localTypeOf[Seq[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
val Schema(dataType, nullable) = schemaFor(elementType)
Schema(ArrayType(dataType, containsNull = nullable), nullable = true)
case t if t <:< localTypeOf[Map[_, _]] =>
val TypeRef(_, _, Seq(keyType, valueType)) = t
val Schema(valueDataType, valueNullable) = schemaFor(valueType)
Schema(MapType(schemaFor(keyType).dataType,
valueDataType, valueContainsNull = valueNullable), nullable = true)
case t if t <:< localTypeOf[Set[_]] =>
val TypeRef(_, _, Seq(elementType)) = t
val Schema(dataType, nullable) = schemaFor(elementType)
Schema(ArrayType(dataType, containsNull = nullable), nullable = true)
case t if t <:< localTypeOf[String] => Schema(StringType, nullable = true)
case t if t <:< localTypeOf[java.sql.Timestamp] => Schema(TimestampType, nullable = true)
case t if t <:< localTypeOf[java.sql.Date] => Schema(DateType, nullable = true)
case t if t <:< localTypeOf[BigDecimal] => Schema(DecimalType.SYSTEM_DEFAULT, nullable = true)
case t if t <:< localTypeOf[java.math.BigDecimal] =>
Schema(DecimalType.SYSTEM_DEFAULT, nullable = true)
case t if t <:< localTypeOf[java.math.BigInteger] =>
Schema(DecimalType.BigIntDecimal, nullable = true)
case t if t <:< localTypeOf[scala.math.BigInt] =>
Schema(DecimalType.BigIntDecimal, nullable = true)
case t if t <:< localTypeOf[Decimal] => Schema(DecimalType.SYSTEM_DEFAULT, nullable = true)
case t if t <:< localTypeOf[java.lang.Integer] => Schema(IntegerType, nullable = true)
case t if t <:< localTypeOf[java.lang.Long] => Schema(LongType, nullable = true)
case t if t <:< localTypeOf[java.lang.Double] => Schema(DoubleType, nullable = true)
case t if t <:< localTypeOf[java.lang.Float] => Schema(FloatType, nullable = true)
case t if t <:< localTypeOf[java.lang.Short] => Schema(ShortType, nullable = true)
case t if t <:< localTypeOf[java.lang.Byte] => Schema(ByteType, nullable = true)
case t if t <:< localTypeOf[java.lang.Boolean] => Schema(BooleanType, nullable = true)
case t if t <:< definitions.IntTpe => Schema(IntegerType, nullable = false)
case t if t <:< definitions.LongTpe => Schema(LongType, nullable = false)
case t if t <:< definitions.DoubleTpe => Schema(DoubleType, nullable = false)
case t if t <:< definitions.FloatTpe => Schema(FloatType, nullable = false)
case t if t <:< definitions.ShortTpe => Schema(ShortType, nullable = false)
case t if t <:< definitions.ByteTpe => Schema(ByteType, nullable = false)
case t if t <:< definitions.BooleanTpe => Schema(BooleanType, nullable = false)
case t if definedByConstructorParams(t) =>
val params = getConstructorParameters(t)
Schema(StructType(
params.map { case (fieldName, fieldType) =>
val Schema(dataType, nullable) = schemaFor(fieldType)
StructField(fieldName, dataType, nullable)
}), nullable = true)
case other =>
throw new UnsupportedOperationException(s"Schema for type $other is not supported")
}
}
/**
* Whether the fields of the given type is defined entirely by its constructor parameters.
*/
def definedByConstructorParams(tpe: Type): Boolean = {
tpe.dealias <:< localTypeOf[Product] || tpe.dealias <:< localTypeOf[DefinedByConstructorParams]
}
private val javaKeywords = Set("abstract", "assert", "boolean", "break", "byte", "case", "catch",
"char", "class", "const", "continue", "default", "do", "double", "else", "extends", "false",
"final", "finally", "float", "for", "goto", "if", "implements", "import", "instanceof", "int",
"interface", "long", "native", "new", "null", "package", "private", "protected", "public",
"return", "short", "static", "strictfp", "super", "switch", "synchronized", "this", "throw",
"throws", "transient", "true", "try", "void", "volatile", "while")
}
/**
* Support for generating catalyst schemas for scala objects. Note that unlike its companion
* object, this trait able to work in both the runtime and the compile time (macro) universe.
*/
trait ScalaReflection {
/** The universe we work in (runtime or macro) */
val universe: scala.reflect.api.Universe
/** The mirror used to access types in the universe */
def mirror: universe.Mirror
import universe._
// The Predef.Map is scala.collection.immutable.Map.
// Since the map values can be mutable, we explicitly import scala.collection.Map at here.
import scala.collection.Map
/**
* Return the Scala Type for `T` in the current classloader mirror.
*
* Use this method instead of the convenience method `universe.typeOf`, which
* assumes that all types can be found in the classloader that loaded scala-reflect classes.
* That's not necessarily the case when running using Eclipse launchers or even
* Sbt console or test (without `fork := true`).
*
* @see SPARK-5281
*/
def localTypeOf[T: TypeTag]: `Type` = {
val tag = implicitly[TypeTag[T]]
tag.in(mirror).tpe.dealias
}
/**
* Returns the full class name for a type. The returned name is the canonical
* Scala name, where each component is separated by a period. It is NOT the
* Java-equivalent runtime name (no dollar signs).
*
* In simple cases, both the Scala and Java names are the same, however when Scala
* generates constructs that do not map to a Java equivalent, such as singleton objects
* or nested classes in package objects, it uses the dollar sign ($) to create
* synthetic classes, emulating behaviour in Java bytecode.
*/
def getClassNameFromType(tpe: `Type`): String = {
tpe.dealias.erasure.typeSymbol.asClass.fullName
}
/**
* Returns classes of input parameters of scala function object.
*/
def getParameterTypes(func: AnyRef): Seq[Class[_]] = {
val methods = func.getClass.getMethods.filter(m => m.getName == "apply" && !m.isBridge)
assert(methods.length == 1)
methods.head.getParameterTypes
}
/**
* Returns the parameter names and types for the primary constructor of this type.
*
* Note that it only works for scala classes with primary constructor, and currently doesn't
* support inner class.
*/
def getConstructorParameters(tpe: Type): Seq[(String, Type)] = {
val dealiasedTpe = tpe.dealias
val formalTypeArgs = dealiasedTpe.typeSymbol.asClass.typeParams
val TypeRef(_, _, actualTypeArgs) = dealiasedTpe
val params = constructParams(dealiasedTpe)
// if there are type variables to fill in, do the substitution (SomeClass[T] -> SomeClass[Int])
if (actualTypeArgs.nonEmpty) {
params.map { p =>
p.name.toString -> p.typeSignature.substituteTypes(formalTypeArgs, actualTypeArgs)
}
} else {
params.map { p =>
p.name.toString -> p.typeSignature
}
}
}
protected def constructParams(tpe: Type): Seq[Symbol] = {
val constructorSymbol = tpe.dealias.member(termNames.CONSTRUCTOR)
val params = if (constructorSymbol.isMethod) {
constructorSymbol.asMethod.paramLists
} else {
// Find the primary constructor, and use its parameter ordering.
val primaryConstructorSymbol: Option[Symbol] = constructorSymbol.asTerm.alternatives.find(
s => s.isMethod && s.asMethod.isPrimaryConstructor)
if (primaryConstructorSymbol.isEmpty) {
sys.error("Internal SQL error: Product object did not have a primary constructor.")
} else {
primaryConstructorSymbol.get.asMethod.paramLists
}
}
params.flatten
}
}
|
akopich/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
|
Scala
|
apache-2.0
| 38,420 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600j.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600j.v3.retriever.CT600JBoxRetriever
case class J45(value: Option[String]) extends SchemeReferenceNumberBox{
override def validate(boxRetriever: CT600JBoxRetriever): Set[CtValidation] =
validateSchemeReferenceNumber(boxRetriever.j40(), boxRetriever.j40A(), boxRetriever.j45A())
}
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600j/v3/J45.scala
|
Scala
|
apache-2.0
| 980 |
package org.elasticmq.actor
import org.elasticmq.actor.reply._
import org.elasticmq.msg.{DeleteQueue, LookupQueue, ListQueues, CreateQueue}
import org.elasticmq.MillisVisibilityTimeout
import org.elasticmq.actor.test.{DataCreationHelpers, QueueManagerForEachTest, ActorTest}
class QueueManagerActorTest extends ActorTest with QueueManagerForEachTest with DataCreationHelpers {
test("non-existent queue should not be found") {
for {
// Given
_ <- queueManagerActor ? CreateQueue(createQueueData("q1", MillisVisibilityTimeout(10L)))
// When
lookupResult <- queueManagerActor ? LookupQueue("q2")
} yield {
// Then
lookupResult should be(None)
}
}
test("after persisting a queue it should be found") {
for {
// Given
_ <- queueManagerActor ? CreateQueue(createQueueData("q1", MillisVisibilityTimeout(1L)))
_ <- queueManagerActor ? CreateQueue(createQueueData("q2", MillisVisibilityTimeout(2L)))
_ <- queueManagerActor ? CreateQueue(createQueueData("q3", MillisVisibilityTimeout(3L)))
// When
lookupResult <- queueManagerActor ? LookupQueue("q2")
} yield {
// Then
lookupResult should be('defined)
}
}
test("queues should be deleted") {
// Given
val q1 = createQueueData("q1", MillisVisibilityTimeout(1L))
val q2 = createQueueData("q2", MillisVisibilityTimeout(2L))
for {
_ <- queueManagerActor ? CreateQueue(q1)
_ <- queueManagerActor ? CreateQueue(q2)
// When
_ <- queueManagerActor ? DeleteQueue(q1.name)
// Then
r1 <- queueManagerActor ? LookupQueue(q1.name)
r2 <- queueManagerActor ? LookupQueue(q2.name)
} yield {
r1 should be(None)
r2 should be('defined)
}
}
test("trying to create an existing queue should throw an exception") {
// Given
val q1 = createQueueData("q1", MillisVisibilityTimeout(1L))
for {
_ <- queueManagerActor ? CreateQueue(q1)
// When & then
result <- queueManagerActor ? CreateQueue(q1)
} yield {
result should be('left)
}
}
test("listing queues") {
// Given
val q1 = createQueueData("q1", MillisVisibilityTimeout(1L))
val q2 = createQueueData("q2", MillisVisibilityTimeout(2L))
for {
_ <- queueManagerActor ? CreateQueue(q1)
_ <- queueManagerActor ? CreateQueue(q2)
// When
queues <- queueManagerActor ? ListQueues()
} yield {
// Then
queues.toSet should be(Set(q1.name, q2.name))
}
}
}
|
adamw/elasticmq
|
core/src/test/scala/org/elasticmq/actor/QueueManagerActorTest.scala
|
Scala
|
apache-2.0
| 2,536 |
package spire.math.prime
import scala.annotation.tailrec
import scala.collection.mutable.{ArrayBuffer}
import System.arraycopy
import spire.math.{SafeLong, min}
import spire.syntax.cfor._
import SieveUtil._
/**
* This respresents a single sieve segment.
*
* The 'start' field says what this segment's first number
* is. 'primes' is a bitset of possible primes in this
* segment. 'cutoff' specifies the largest prime factor we're
* interested in. This means that cutoff**2-1 is the largest number we
* could reliably identify as prime.
*
* We are using a mod30 wheel, which means that we don't need to
* manually factor using 2, 3, or 5 (30 is the lcm of 2, 3, and
* 5). Since each wheel turn is 30-bits, and our bitset groups
* elements into 32-bit groups (integers), we have a 480-bit (15
* integer) period between the wheel and the bitset. This requires our
* segment length to be divisible by 480.
*
* When building a sieve, we will first initialize using the mod30
* wheel. Then, if we are on the first segment, we'll do a traditional
* sieve. We'll save any primes greater than 5 we find as factors,
* either fast factors (if they will show up frequently in each
* segment) or slow factors otherwise. If a factor is larger than
* cutoff we don't save it. After that we'll be done with the first
* segment.
*
* For later segments, we will use our fast and slow factors to block
* out composites as we find them. Like in the first segment, we'll
* save factors we find (although any new factors we find now will
* always be slow). And of course we won't save any factors above our
* cutoff.
*
* Once the sieve is initialized it doesn't do anything else
* interesting, besides report prime numbers. Currently its internals
* are made available to the Siever.
*/
object SieveSegment {
val wheel30: Array[Int] = {
var b: Long = 0L
b |= (1 << 1)
b |= (1 << 7)
b |= (1 << 11)
b |= (1 << 13)
b |= (1 << 17)
b |= (1 << 19)
b |= (1 << 23)
b |= (1 << 29)
val n: Long = b | (b << 30L)
val arr = new Array[Int](15)
cfor(0)(_ < 15, _ + 1) { i =>
arr(i) = ((n >>> (i * 2)) & 0xffffffffL).toInt
}
arr
}
}
case class SieveSegment(start: SafeLong, primes: BitSet, cutoff: SafeLong) {
def isPrime(n: SafeLong): Boolean = primes((n - start).toInt)
def isComposite(n: SafeLong): Boolean = !primes((n - start).toInt)
def set(n: SafeLong): Unit = primes += (n - start).toInt
def unset(n: SafeLong): Unit = primes -= (n - start).toInt
def nextAfter(n: SafeLong): SafeLong = {
var i = (n - start + 2).toInt
val len = primes.length
while (i < len) {
if (primes(i)) return start + i
i += 2
}
SafeLong(-1L) // fail
}
def init(fastq: FastFactors, slowq: FactorHeap): Unit = {
initMod30()
if (start == 0) {
initFirst(fastq, slowq)
} else {
val limit = min(cutoff ** 2, start + primes.length)
initFromArray(fastq)
initFromQueue(limit, slowq)
initRest(slowq)
}
}
def initMod30(): Unit = {
val arr = primes.array
assert(arr.length % 15 == 0)
val limit = arr.length
val wheel = SieveSegment.wheel30
cfor(0)(_ < limit, _ + 15)(i => arraycopy(wheel, 0, arr, i, 15))
if (start == 0L) {
primes -= 1
primes += 2
primes += 3
primes += 5
}
}
private def initFromArray(fastq: FastFactors): Unit = {
val arr = fastq.arr
var i = 0
val len: Long = if (start + primes.length < cutoff)
(cutoff - start).toLong
else
primes.length
while (i < arr.length) {
val factor = arr(i)
var j = (factor.m - start).toInt
val k = factor.p
val kk = k + k
val lim = len - kk
primes -= j
while (j < lim) {
j += kk
primes -= j
}
factor.m = start + j + kk
i += 1
}
}
@tailrec private def initFromQueue(limit: SafeLong, q: FactorHeap): Unit = {
if (q.isEmpty) return ()
val factor = q.dequeue
val m = factor.next
if (m < limit) {
val p = factor.p
val len = primes.length
var i = (m - start).toInt
val m2 = if (p < len) {
val k = p.toInt
val kk = k + k
while (i < len) { primes -= i; i += kk }
start + i
} else {
primes -= i
m + p
}
factor.next = m2
q += factor
initFromQueue(limit, q)
} else {
q += factor
}
}
def initFirst(fastq: FastFactors, slowq: FactorHeap): Unit = {
var p: Int = 1
val len = primes.length
val buf = ArrayBuffer.empty[FastFactor]
while (p < len) {
if (primes(p)) {
var m = p.toLong * p.toLong
if (m < len) {
val pp = p + p
var k = m.toInt
primes -= k
val lim = len - pp
while (k < lim) { k += pp; primes -= k }
m = k.toLong + pp
}
if (p < 7) {
} else if (m - primes.length < primes.length) {
buf += FastFactor(p, SafeLong(m))
} else if (cutoff > p) {
slowq += Factor(SafeLong(p), SafeLong(m))
}
}
p += 2
}
fastq.arr = buf.toArray
}
def initRest(slowq: FactorHeap): Unit = {
if (start >= cutoff) return ()
val len: Long = if (start + primes.length >= cutoff)
(cutoff - start).toLong
else
primes.length
var i = 1
while (i < len) {
if (primes(i)) {
val p: SafeLong = start + i
slowq += Factor(p, p ** 2)
}
i += 2
}
}
}
|
woparry/spire
|
core/src/main/scala/spire/math/prime/SieveSegment.scala
|
Scala
|
mit
| 5,550 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.sql
import java.util.ArrayList
import java.util.{ LinkedHashSet => JHashSet }
import java.util.{ List => JList }
import java.util.{ Map => JMap }
import java.util.Properties
import scala.collection.JavaConverters.asScalaBufferConverter
import scala.collection.JavaConverters.propertiesAsScalaMapConverter
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.types.ArrayType
import org.apache.spark.sql.types.BinaryType
import org.apache.spark.sql.types.BooleanType
import org.apache.spark.sql.types.ByteType
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql.types.DataTypes
import org.apache.spark.sql.types.DoubleType
import org.apache.spark.sql.types.FloatType
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.types.LongType
import org.apache.spark.sql.types.NullType
import org.apache.spark.sql.types.ShortType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.TimestampType
import org.elasticsearch.hadoop.EsHadoopIllegalArgumentException
import org.elasticsearch.hadoop.cfg.InternalConfigurationOptions
import org.elasticsearch.hadoop.cfg.Settings
import org.elasticsearch.hadoop.rest.InitializationUtils
import org.elasticsearch.hadoop.rest.RestRepository
import org.elasticsearch.hadoop.serialization.FieldType.BINARY
import org.elasticsearch.hadoop.serialization.FieldType.BOOLEAN
import org.elasticsearch.hadoop.serialization.FieldType.BYTE
import org.elasticsearch.hadoop.serialization.FieldType.DATE
import org.elasticsearch.hadoop.serialization.FieldType.DOUBLE
import org.elasticsearch.hadoop.serialization.FieldType.HALF_FLOAT
import org.elasticsearch.hadoop.serialization.FieldType.SCALED_FLOAT
import org.elasticsearch.hadoop.serialization.FieldType.FLOAT
import org.elasticsearch.hadoop.serialization.FieldType.GEO_POINT
import org.elasticsearch.hadoop.serialization.FieldType.GEO_SHAPE
import org.elasticsearch.hadoop.serialization.FieldType.INTEGER
import org.elasticsearch.hadoop.serialization.FieldType.KEYWORD
import org.elasticsearch.hadoop.serialization.FieldType.LONG
import org.elasticsearch.hadoop.serialization.FieldType.NESTED
import org.elasticsearch.hadoop.serialization.FieldType.NULL
import org.elasticsearch.hadoop.serialization.FieldType.OBJECT
import org.elasticsearch.hadoop.serialization.FieldType.SHORT
import org.elasticsearch.hadoop.serialization.FieldType.STRING
import org.elasticsearch.hadoop.serialization.FieldType.TEXT
import org.elasticsearch.hadoop.serialization.dto.mapping.Field
import org.elasticsearch.hadoop.serialization.dto.mapping.GeoField
import org.elasticsearch.hadoop.serialization.dto.mapping.GeoPointType
import org.elasticsearch.hadoop.serialization.dto.mapping.GeoShapeType
import org.elasticsearch.hadoop.serialization.dto.mapping.MappingUtils
import org.elasticsearch.hadoop.serialization.field.FieldFilter
import org.elasticsearch.hadoop.serialization.field.FieldFilter.NumberedInclude
import org.elasticsearch.hadoop.util.Assert
import org.elasticsearch.hadoop.util.IOUtils
import org.elasticsearch.hadoop.util.SettingsUtils
import org.elasticsearch.hadoop.util.StringUtils
import org.elasticsearch.spark.sql.Utils.ROOT_LEVEL_NAME
import org.elasticsearch.spark.sql.Utils.ROW_INFO_ARRAY_PROPERTY
import org.elasticsearch.spark.sql.Utils.ROW_INFO_ORDER_PROPERTY
private[sql] object SchemaUtils {
case class Schema(field: Field, struct: StructType)
def discoverMapping(cfg: Settings): Schema = {
val (field, geoInfo) = discoverMappingAsField(cfg)
val struct = convertToStruct(field, geoInfo, cfg)
Schema(field, struct)
}
def discoverMappingAsField(cfg: Settings): (Field, JMap[String, GeoField]) = {
InitializationUtils.validateSettings(cfg);
InitializationUtils.discoverEsVersion(cfg, Utils.LOGGER);
val repo = new RestRepository(cfg)
try {
if (repo.indexExists(true)) {
var field = repo.getMapping
if (field == null) {
throw new EsHadoopIllegalArgumentException(s"Cannot find mapping for ${cfg.getResourceRead} - one is required before using Spark SQL")
}
field = MappingUtils.filterMapping(field, cfg);
val geoInfo = repo.sampleGeoFields(field)
// apply mapping filtering only when present to minimize configuration settings (big when dealing with large mappings)
if (StringUtils.hasText(cfg.getReadFieldInclude) || StringUtils.hasText(cfg.getReadFieldExclude)) {
// NB: metadata field is synthetic so it doesn't have to be filtered
// its presence is controlled through the dedicated config setting
cfg.setProperty(InternalConfigurationOptions.INTERNAL_ES_TARGET_FIELDS, StringUtils.concatenate(Field.toLookupMap(field).keySet(), StringUtils.DEFAULT_DELIMITER))
}
return (field, geoInfo)
}
else {
throw new EsHadoopIllegalArgumentException(s"Cannot find mapping for ${cfg.getResourceRead} - one is required before using Spark SQL")
}
} finally {
repo.close()
}
}
def convertToStruct(rootField: Field, geoInfo: JMap[String, GeoField], cfg: Settings): StructType = {
val arrayIncludes = SettingsUtils.getFieldArrayFilterInclude(cfg)
val arrayExcludes = StringUtils.tokenize(cfg.getReadFieldAsArrayExclude)
var fields = for (fl <- rootField.properties()) yield convertField(fl, geoInfo, null, arrayIncludes, arrayExcludes, cfg)
if (cfg.getReadMetadata) {
// enrich structure
val metadataMap = DataTypes.createStructField(cfg.getReadMetadataField, DataTypes.createMapType(StringType, StringType, true), true)
fields :+= metadataMap
}
DataTypes.createStructType(fields)
}
private def convertToStruct(field: Field, geoInfo: JMap[String, GeoField], parentName: String,
arrayIncludes: JList[NumberedInclude], arrayExcludes: JList[String], cfg:Settings): StructType = {
DataTypes.createStructType(for (fl <- field.properties()) yield convertField(fl, geoInfo, parentName, arrayIncludes, arrayExcludes, cfg))
}
private def convertField(field: Field, geoInfo: JMap[String, GeoField], parentName: String,
arrayIncludes: JList[NumberedInclude], arrayExcludes: JList[String], cfg:Settings): StructField = {
val absoluteName = if (parentName != null) parentName + "." + field.name() else field.name()
val matched = FieldFilter.filter(absoluteName, arrayIncludes, arrayExcludes, false)
val createArray = !arrayIncludes.isEmpty() && matched.matched
var dataType = Utils.extractType(field) match {
case NULL => NullType
case BINARY => BinaryType
case BOOLEAN => BooleanType
case BYTE => ByteType
case SHORT => ShortType
case INTEGER => IntegerType
case LONG => LongType
case FLOAT => FloatType
case DOUBLE => DoubleType
// String type
case STRING => StringType
case TEXT => StringType
case KEYWORD => StringType
case HALF_FLOAT => FloatType
case SCALED_FLOAT => FloatType
case DATE => if (cfg.getMappingDateRich) TimestampType else StringType
case OBJECT => convertToStruct(field, geoInfo, absoluteName, arrayIncludes, arrayExcludes, cfg)
case NESTED => DataTypes.createArrayType(convertToStruct(field, geoInfo, absoluteName, arrayIncludes, arrayExcludes, cfg))
// GEO
case GEO_POINT => {
val geoPoint = geoInfo.get(absoluteName) match {
case GeoPointType.LON_LAT_ARRAY => DataTypes.createArrayType(DoubleType)
case GeoPointType.GEOHASH => StringType
case GeoPointType.LAT_LON_STRING => StringType
case GeoPointType.LAT_LON_OBJECT => {
val lon = DataTypes.createStructField("lat", DoubleType, true)
val lat = DataTypes.createStructField("lon", DoubleType, true)
DataTypes.createStructType(Array(lon,lat))
}
}
if (Utils.LOGGER.isDebugEnabled()) {
Utils.LOGGER.debug(s"Detected field [${absoluteName}] as a GeoPoint with format ${geoPoint.simpleString}")
}
geoPoint
}
case GEO_SHAPE => {
val fields = new ArrayList[StructField]()
fields.add(DataTypes.createStructField("type", StringType, true))
val COORD = "coordinates"
geoInfo.get(absoluteName) match {
case GeoShapeType.POINT => fields.add(DataTypes.createStructField(COORD, DataTypes.createArrayType(DoubleType), true))
case GeoShapeType.LINE_STRING => fields.add(DataTypes.createStructField(COORD, createNestedArray(DoubleType, 2), true))
case GeoShapeType.POLYGON => {
fields.add(DataTypes.createStructField(COORD, createNestedArray(DoubleType, 3), true))
fields.add(DataTypes.createStructField("orientation", StringType, true))
}
case GeoShapeType.MULTI_POINT => fields.add(DataTypes.createStructField(COORD, createNestedArray(DoubleType, 2), true))
case GeoShapeType.MULTI_LINE_STRING => fields.add(DataTypes.createStructField(COORD, createNestedArray(DoubleType, 3), true))
case GeoShapeType.MULTI_POLYGON => fields.add(DataTypes.createStructField(COORD, createNestedArray(DoubleType, 4), true))
case GeoShapeType.GEOMETRY_COLLECTION => throw new EsHadoopIllegalArgumentException(s"Geoshape $geoInfo not supported")
case GeoShapeType.ENVELOPE => fields.add(DataTypes.createStructField(COORD, createNestedArray(DoubleType, 2), true))
case GeoShapeType.CIRCLE => {
fields.add(DataTypes.createStructField(COORD, DataTypes.createArrayType(DoubleType), true))
fields.add(DataTypes.createStructField("radius", StringType, true))
}
}
val geoShape = DataTypes.createStructType(fields)
if (Utils.LOGGER.isDebugEnabled()) {
Utils.LOGGER.debug(s"Detected field [${absoluteName}] as a GeoShape with format ${geoShape.simpleString}")
}
geoShape
}
// fall back to String
case _ => StringType //throw new EsHadoopIllegalStateException("Unknown field type " + field);
}
if (createArray) {
// can't call createNestedArray for some reason...
var currentDepth = 0;
for (currentDepth <- 0 until matched.depth) {
dataType = DataTypes.createArrayType(dataType)
}
}
DataTypes.createStructField(field.name(), dataType, true)
}
private def createNestedArray(elementType: DataType, depth: Int): DataType = {
var currentDepth = 0;
var array = elementType
for (currentDepth <- 0 until depth) {
array = DataTypes.createArrayType(array)
}
array
}
def setRowInfo(settings: Settings, struct: StructType) = {
val rowInfo = detectRowInfo(settings, struct)
// save the field in the settings to pass it to the value reader
settings.setProperty(ROW_INFO_ORDER_PROPERTY, IOUtils.propsToString(rowInfo._1))
// also include any array info
settings.setProperty(ROW_INFO_ARRAY_PROPERTY, IOUtils.propsToString(rowInfo._2))
}
def getRowInfo(settings: Settings) = {
val rowOrderString = settings.getProperty(ROW_INFO_ORDER_PROPERTY)
Assert.hasText(rowOrderString, "no schema/row order detected...")
val rowOrderProps = IOUtils.propsFromString(rowOrderString)
val rowArrayString = settings.getProperty(ROW_INFO_ARRAY_PROPERTY)
val rowArrayProps = if (StringUtils.hasText(rowArrayString)) IOUtils.propsFromString(rowArrayString) else new Properties()
val order = new scala.collection.mutable.LinkedHashMap[String, Seq[String]]
for (prop <- rowOrderProps.asScala) {
val value = StringUtils.tokenize(prop._2).asScala
if (!value.isEmpty) {
order.put(prop._1, new ArrayBuffer() ++= value)
}
}
val needToBeArray = new JHashSet[String]()
for (prop <- rowArrayProps.asScala) {
needToBeArray.add(prop._1)
}
(order,needToBeArray)
}
def detectRowInfo(settings: Settings, struct: StructType): (Properties, Properties) = {
// tuple - 1 = columns (in simple names) for each row, 2 - what fields (in absolute names) are arrays
val rowInfo = (new Properties, new Properties)
doDetectInfo(rowInfo, ROOT_LEVEL_NAME, struct)
val csv = settings.getScrollFields()
// if a projection is applied (filtering or projection) use that instead
if (StringUtils.hasText(csv)) {
if (settings.getReadMetadata) {
rowInfo._1.setProperty(ROOT_LEVEL_NAME, csv + StringUtils.DEFAULT_DELIMITER + settings.getReadMetadataField)
}
else {
rowInfo._1.setProperty(ROOT_LEVEL_NAME, csv)
}
}
rowInfo
}
private def doDetectInfo(info: (Properties, Properties), level: String, dataType: DataType) {
dataType match {
case s: StructType => {
val fields = new java.util.ArrayList[String]
for (field <- s) {
fields.add(field.name)
doDetectInfo(info, if (level != ROOT_LEVEL_NAME) level + "." + field.name else field.name, field.dataType)
}
info._1.setProperty(level, StringUtils.concatenate(fields, StringUtils.DEFAULT_DELIMITER))
}
case a: ArrayType => {
val prop = info._2.getProperty(level)
var depth = 0
if (StringUtils.hasText(prop)) {
depth = Integer.parseInt(prop)
}
depth += 1
info._2.setProperty(level, String.valueOf(depth))
doDetectInfo(info, level, a.elementType)
}
// ignore primitives
case _ => // ignore
}
}
}
|
xjrk58/elasticsearch-hadoop
|
spark/sql-13/src/main/scala/org/elasticsearch/spark/sql/SchemaUtils.scala
|
Scala
|
apache-2.0
| 14,646 |
//no import of stainless.collection.List
object List3 {
def foobar = 1 :: Nil
}
|
epfl-lara/stainless
|
frontends/benchmarks/extraction/invalid/List3.scala
|
Scala
|
apache-2.0
| 83 |
package com.arcusys.valamis.settings.service
/**
* Created by igorborisov on 17.10.14.
*/
trait SiteDependentSettingService {
def setSetting(siteId: Int, name: String, value: Option[String]): Unit
def getSetting(siteId: Int, name: String): Option[String]
}
|
ViLPy/Valamis
|
valamis-core/src/main/scala/com/arcusys/valamis/settings/service/SiteDependentSettingService.scala
|
Scala
|
lgpl-3.0
| 266 |
package org.json4s
package ext
class JacksonTypeFieldSerializerSpec extends TypeFieldSerializerSpec("Jackson") {
val s: Serialization = jackson.Serialization
}
|
json4s/json4s
|
tests/src/test/scala/org/json4s/ext/JacksonTypeFieldSerializerSpec.scala
|
Scala
|
apache-2.0
| 163 |
/*
* This file is part of AckCord, licensed under the MIT License (MIT).
*
* Copyright (c) 2019 Katrix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package ackcord.util
import akka.NotUsed
import akka.stream.scaladsl.{Flow, Merge, Sink, Source}
import cats.{Alternative, Contravariant, Functor, MonadError, StackSafeMonad}
object StreamInstances {
type SourceRequest[A] = Source[A, NotUsed]
implicit val sourceInstance: MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] =
new MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] with StackSafeMonad[SourceRequest] {
override def empty[A]: SourceRequest[A] = Source.empty[A]
override def pure[A](x: A): SourceRequest[A] = Source.single(x)
override def map[A, B](fa: SourceRequest[A])(f: A => B): SourceRequest[B] = fa.map(f)
override def flatMap[A, B](fa: SourceRequest[A])(f: A => SourceRequest[B]): SourceRequest[B] =
fa.flatMapConcat[B, NotUsed](f)
override def product[A, B](fa: SourceRequest[A], fb: SourceRequest[B]): SourceRequest[(A, B)] = fa.zip(fb)
override def combineK[A](x: SourceRequest[A], y: SourceRequest[A]): SourceRequest[A] =
Source.combine(x, y)(Merge.apply(_))
override def raiseError[A](e: Throwable): SourceRequest[A] = Source.failed(e)
override def handleErrorWith[A](fa: SourceRequest[A])(f: Throwable => SourceRequest[A]): SourceRequest[A] =
fa.recoverWithRetries[A](5, {
case e: Throwable => f(e).mapMaterializedValue(_ => NotUsed)
})
}
implicit def flowInstance[In, Mat]: Functor[Flow[In, *, Mat]] = new Functor[Flow[In, *, Mat]] {
override def map[A, B](fa: Flow[In, A, Mat])(f: A => B): Flow[In, B, Mat] = fa.map(f)
}
implicit def sinkInstance[Mat]: Contravariant[Sink[*, Mat]] = new Contravariant[Sink[*, Mat]] {
override def contramap[A, B](fa: Sink[A, Mat])(f: B => A): Sink[B, Mat] = fa.contramap(f)
}
//For syntax on Source can be brittle
implicit class SourceFlatmap[A, M1](private val source: Source[A, M1]) extends AnyVal {
def flatMap[B, M2](f: A => Source[B, M2]): Source[B, M1] = source.flatMapConcat(f)
}
}
|
Katrix-/AckCord
|
requests/src/main/scala/ackcord/util/StreamInstances.scala
|
Scala
|
mit
| 3,213 |
package controllers
import actors.{StationsDBActor, WebSocketActor}
import models._
import play.Logger
import play.api.Play
import play.api.libs.json._
import play.api.libs.ws._
import play.api.mvc._
import akka.actor._
import javax.inject._
import StationsDBActor._
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import scala.concurrent.Future
import scala.concurrent.duration._
@Singleton
class Application @Inject() (system: ActorSystem, ws: WSClient) extends Controller {
val contract = Play.configuration.getString("jcdecaux.api.contract").get
val apiKey = Play.configuration.getString("jcdecaux.api.key").get
implicit val positionReads = Json.reads[Position]
implicit val stationReads = Json.reads[Station]
val stationsDBActor = system.actorOf(StationsDBActor.props, "stations-db-actor")
def socket = WebSocket.acceptWithActor[JsValue, JsValue] { request => out =>
WebSocketActor.props(stationsDBActor,out)
}
system.scheduler.schedule(10.millisecond, 3.seconds) {
updateAllStations
}
def update = Action.async {
updateAllStations.map {
case Some(_) =>
Ok
case None =>
InternalServerError
}
}
def updateAllStations: Future[Option[List[Station]]] = ws.url(s"https://api.jcdecaux.com/vls/v1/stations?contract=$contract&apiKey=$apiKey").get().map {
_.json.validate[List[Station]].fold(
validationErrors => {
Logger.error(validationErrors.toString())
None
}, {
case stations =>
stations.map {
stationsDBActor ! Upsert(_)
}
Some(stations)
})
}
}
|
jdauphant/reactive-velib-api
|
app/controllers/Application.scala
|
Scala
|
bsd-2-clause
| 1,643 |
/**
* Ephedra Food Alerts
* Copyright (C) 2013-2014 Philippe Sam-Long aka pulsation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package eu.pulsation.ephedra
import android.app.Fragment
import android.os.Bundle
import android.view.View
import android.util.Log
import android.widget.TextView
import android.text.method.LinkMovementMethod
import android.view.{LayoutInflater, ViewGroup}
class AboutFragment extends Fragment {
final private val TAG="eu.pulsation.ephedra.AboutFragment"
override def onCreateView(inflater: LayoutInflater, container: ViewGroup, savedInstanceState: Bundle):View = {
inflater.inflate(R.layout.about, container, false)
}
def dataOriginDetails : TextView = {
getView().findViewById(R.id.about_data_origin_details) match {
case txt: TextView => txt
case _ => throw new ClassCastException
}
}
def responsabilityDetails : TextView = {
getView().findViewById(R.id.about_data_responsability_details) match {
case txt: TextView => txt
case _ => throw new ClassCastException
}
}
def dataUseConditions : TextView = {
getView().findViewById(R.id.about_data_use_conditions) match {
case txt: TextView => txt
case _ => throw new ClassCastException
}
}
override def onStart() {
// Activate web links
dataOriginDetails.setMovementMethod(LinkMovementMethod.getInstance())
responsabilityDetails.setMovementMethod(LinkMovementMethod.getInstance())
dataUseConditions.setMovementMethod(LinkMovementMethod.getInstance())
super.onStart()
}
override def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
}
}
|
pulsation/ephedra-android
|
src/eu/pulsation/ephedra/About/AboutFragment.scala
|
Scala
|
gpl-3.0
| 2,290 |
package servlet
import org.eclipse.jgit.http.server.GitServlet
import org.eclipse.jgit.lib._
import org.eclipse.jgit.transport._
import org.eclipse.jgit.transport.resolver._
import org.slf4j.LoggerFactory
import javax.servlet.ServletConfig
import javax.servlet.ServletContext
import javax.servlet.http.{HttpServletResponse, HttpServletRequest}
import util.{StringUtil, Keys, JGitUtil, Directory}
import util.ControlUtil._
import util.Implicits._
import service._
import WebHookService._
import org.eclipse.jgit.api.Git
import util.JGitUtil.CommitInfo
import service.IssuesService.IssueSearchCondition
import model.Session
/**
* Provides Git repository via HTTP.
*
* This servlet provides only Git repository functionality.
* Authentication is provided by [[servlet.BasicAuthenticationFilter]].
*/
class GitRepositoryServlet extends GitServlet with SystemSettingsService {
private val logger = LoggerFactory.getLogger(classOf[GitRepositoryServlet])
override def init(config: ServletConfig): Unit = {
setReceivePackFactory(new GitBucketReceivePackFactory())
// TODO are there any other ways...?
super.init(new ServletConfig(){
def getInitParameter(name: String): String = name match {
case "base-path" => Directory.RepositoryHome
case "export-all" => "true"
case name => config.getInitParameter(name)
}
def getInitParameterNames(): java.util.Enumeration[String] = {
config.getInitParameterNames
}
def getServletContext(): ServletContext = config.getServletContext
def getServletName(): String = config.getServletName
})
super.init(config)
}
override def service(req: HttpServletRequest, res: HttpServletResponse): Unit = {
val agent = req.getHeader("USER-AGENT")
val index = req.getRequestURI.indexOf(".git")
if(index >= 0 && (agent == null || agent.toLowerCase.indexOf("git/") < 0)){
// redirect for browsers
val paths = req.getRequestURI.substring(0, index).split("/")
res.sendRedirect(baseUrl(req) + "/" + paths.dropRight(1).last + "/" + paths.last)
} else {
// response for git client
super.service(req, res)
}
}
}
class GitBucketReceivePackFactory extends ReceivePackFactory[HttpServletRequest] with SystemSettingsService {
private val logger = LoggerFactory.getLogger(classOf[GitBucketReceivePackFactory])
override def create(request: HttpServletRequest, db: Repository): ReceivePack = {
val receivePack = new ReceivePack(db)
val pusher = request.getAttribute(Keys.Request.UserName).asInstanceOf[String]
logger.debug("requestURI: " + request.getRequestURI)
logger.debug("pusher:" + pusher)
defining(request.paths){ paths =>
val owner = paths(1)
val repository = paths(2).stripSuffix(".git")
logger.debug("repository:" + owner + "/" + repository)
if(!repository.endsWith(".wiki")){
defining(request) { implicit r =>
val hook = new CommitLogHook(owner, repository, pusher, baseUrl)
receivePack.setPreReceiveHook(hook)
receivePack.setPostReceiveHook(hook)
}
}
receivePack
}
}
}
import scala.collection.JavaConverters._
class CommitLogHook(owner: String, repository: String, pusher: String, baseUrl: String)(implicit session: Session)
extends PostReceiveHook with PreReceiveHook
with RepositoryService with AccountService with IssuesService with ActivityService with PullRequestService with WebHookService {
private val logger = LoggerFactory.getLogger(classOf[CommitLogHook])
private var existIds: Seq[String] = Nil
def onPreReceive(receivePack: ReceivePack, commands: java.util.Collection[ReceiveCommand]): Unit = {
try {
using(Git.open(Directory.getRepositoryDir(owner, repository))) { git =>
existIds = JGitUtil.getAllCommitIds(git)
}
} catch {
case ex: Exception => {
logger.error(ex.toString, ex)
throw ex
}
}
}
def onPostReceive(receivePack: ReceivePack, commands: java.util.Collection[ReceiveCommand]): Unit = {
try {
using(Git.open(Directory.getRepositoryDir(owner, repository))) { git =>
val pushedIds = scala.collection.mutable.Set[String]()
commands.asScala.foreach { command =>
logger.debug(s"commandType: ${command.getType}, refName: ${command.getRefName}")
val refName = command.getRefName.split("/")
val branchName = refName.drop(2).mkString("/")
val commits = if (refName(1) == "tags") {
Nil
} else {
command.getType match {
case ReceiveCommand.Type.DELETE => Nil
case _ => JGitUtil.getCommitLog(git, command.getOldId.name, command.getNewId.name)
}
}
// Retrieve all issue count in the repository
val issueCount =
countIssue(IssueSearchCondition(state = "open"), false, owner -> repository) +
countIssue(IssueSearchCondition(state = "closed"), false, owner -> repository)
// Extract new commit and apply issue comment
val defaultBranch = getRepository(owner, repository, baseUrl).get.repository.defaultBranch
val newCommits = commits.flatMap { commit =>
if (!existIds.contains(commit.id) && !pushedIds.contains(commit.id)) {
if (issueCount > 0) {
pushedIds.add(commit.id)
createIssueComment(commit)
// close issues
if(refName(1) == "heads" && branchName == defaultBranch && command.getType == ReceiveCommand.Type.UPDATE){
closeIssuesFromMessage(commit.fullMessage, pusher, owner, repository)
}
}
Some(commit)
} else None
}
// record activity
if(refName(1) == "heads"){
command.getType match {
case ReceiveCommand.Type.CREATE => recordCreateBranchActivity(owner, repository, pusher, branchName)
case ReceiveCommand.Type.UPDATE => recordPushActivity(owner, repository, pusher, branchName, newCommits)
case ReceiveCommand.Type.DELETE => recordDeleteBranchActivity(owner, repository, pusher, branchName)
case _ =>
}
} else if(refName(1) == "tags"){
command.getType match {
case ReceiveCommand.Type.CREATE => recordCreateTagActivity(owner, repository, pusher, branchName, newCommits)
case ReceiveCommand.Type.DELETE => recordDeleteTagActivity(owner, repository, pusher, branchName, newCommits)
case _ =>
}
}
if(refName(1) == "heads"){
command.getType match {
case ReceiveCommand.Type.CREATE |
ReceiveCommand.Type.UPDATE |
ReceiveCommand.Type.UPDATE_NONFASTFORWARD =>
updatePullRequests(branchName)
case _ =>
}
}
// call web hook
getWebHookURLs(owner, repository) match {
case webHookURLs if(webHookURLs.nonEmpty) =>
for(pusherAccount <- getAccountByUserName(pusher);
ownerAccount <- getAccountByUserName(owner);
repositoryInfo <- getRepository(owner, repository, baseUrl)){
callWebHook(owner, repository, webHookURLs,
WebHookPayload(git, pusherAccount, command.getRefName, repositoryInfo, newCommits, ownerAccount))
}
case _ =>
}
}
}
// update repository last modified time.
updateLastActivityDate(owner, repository)
} catch {
case ex: Exception => {
logger.error(ex.toString, ex)
throw ex
}
}
}
private def createIssueComment(commit: CommitInfo) = {
StringUtil.extractIssueId(commit.fullMessage).foreach { issueId =>
if(getIssue(owner, repository, issueId).isDefined){
getAccountByMailAddress(commit.committerEmailAddress).foreach { account =>
createComment(owner, repository, account.userName, issueId.toInt, commit.fullMessage + " " + commit.id, "commit")
}
}
}
}
/**
* Fetch pull request contents into refs/pull/${issueId}/head and update pull request table.
*/
private def updatePullRequests(branch: String) =
getPullRequestsByRequest(owner, repository, branch, false).foreach { pullreq =>
if(getRepository(pullreq.userName, pullreq.repositoryName, baseUrl).isDefined){
using(Git.open(Directory.getRepositoryDir(pullreq.userName, pullreq.repositoryName)),
Git.open(Directory.getRepositoryDir(pullreq.requestUserName, pullreq.requestRepositoryName))){ (oldGit, newGit) =>
oldGit.fetch
.setRemote(Directory.getRepositoryDir(owner, repository).toURI.toString)
.setRefSpecs(new RefSpec(s"refs/heads/${branch}:refs/pull/${pullreq.issueId}/head").setForceUpdate(true))
.call
val commitIdTo = oldGit.getRepository.resolve(s"refs/pull/${pullreq.issueId}/head").getName
val commitIdFrom = JGitUtil.getForkedCommitId(oldGit, newGit,
pullreq.userName, pullreq.repositoryName, pullreq.branch,
pullreq.requestUserName, pullreq.requestRepositoryName, pullreq.requestBranch)
updateCommitId(pullreq.userName, pullreq.repositoryName, pullreq.issueId, commitIdTo, commitIdFrom)
}
}
}
}
|
mqshen/gitbucketTest
|
src/main/scala/servlet/GitRepositoryServlet.scala
|
Scala
|
apache-2.0
| 9,503 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.api.items
import com.castlebravostudios.rayguns.items.misc.GetFireInformationEvent
import com.castlebravostudios.rayguns.items.misc.GunTickEvent
import com.castlebravostudios.rayguns.items.misc.PostfireEvent
import com.castlebravostudios.rayguns.items.misc.PrefireEvent
trait RaygunModule {
/**
* Get the module key for this module. This key will be stored in the NBT data
* of the ray gun it's attached to so that the module item can be looked up
* later.<br>
*
* IMPORTANT NOTE: This key must not be changed once your plugin is released!
* Modules which cannot be found using the keys stored in NBT will be removed
* from the ray gun.
*/
def moduleKey : String
/**
* Get the power modifier for this module. The power modifiers for all four
* modules in a gun will be multiplied together with some constant to produce
* the power cost to fire the gun.
*/
def powerModifier : Double
/**
* Get a string that will be looked up in the internationalization file
* and used to replace an appropriate segment of the raygun name pattern.
*/
def nameSegmentKey : String
/**
* Get the item associated with this module, or None if registerItem has not
* been called.
*/
def item : Option[ItemModule]
/**
* Create the ItemModule associated with this module and register it with the
* game under the given ID. If ID is less than or equal to zero, this method
* should do nothing - this module has been disabled in the configuration file.
* After this method is called, item should not return None.
*/
def registerItem( ) : Unit
/**
* Event fired by a raygun to all modules to collect information before preparing
* to fire. This is used for calculating the power cost, among other things.
*/
def handleGetFireInformationEvent( event : GetFireInformationEvent ) : Unit = ()
/**
* Event fired by a raygun to all modules it contains just before firing. This
* is used for checking power and rejecting the attempt to fire, among other
* things.
*/
def handlePrefireEvent( event : PrefireEvent ) : Unit = ()
/**
* Event fired by a raygun to all modules it contains just after firing. This
* is used for subtracting power and other things.
*/
def handlePostfireEvent( event : PostfireEvent ) : Unit = ()
/**
* Event fired by a raygun to all modules it contains every server tick while
* it's in the player's inventory.
*/
def handleTickEvent( event : GunTickEvent ) : Unit = ()
}
|
Redattack34/ModularRayguns
|
src/main/scala/com/castlebravostudios/rayguns/api/items/RaygunModule.scala
|
Scala
|
bsd-3-clause
| 4,152 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
import java.util
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.project.DumbServiceImpl
import com.intellij.openapi.util.Key
import com.intellij.pom.java.LanguageLevel
import com.intellij.psi.PsiReferenceList.Role
import com.intellij.psi._
import com.intellij.psi.impl.source.HierarchicalMethodSignatureImpl
import com.intellij.psi.tree.TokenSet
import com.intellij.psi.util.{MethodSignatureBackedByPsiMethod, PsiModificationTracker}
import com.intellij.util.containers.ConcurrentHashMap
import org.jetbrains.plugins.scala.caches.CachesUtil
import org.jetbrains.plugins.scala.extensions.{toPsiClassExt, toPsiNamedElementExt}
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.ScMethodLike
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlockStatement
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.fake.{FakePsiReferenceList, FakePsiTypeParameterList}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.{JavaIdentifier, ScSyntheticFunction, ScSyntheticTypeParameter, SyntheticClasses}
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.TypeDefinitionMembers
import org.jetbrains.plugins.scala.lang.psi.light.ScFunctionWrapper
import org.jetbrains.plugins.scala.lang.psi.light.scala.{ScLightFunctionDeclaration, ScLightFunctionDefinition}
import org.jetbrains.plugins.scala.lang.psi.stubs.ScFunctionStub
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue._
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.psi.types.{Unit => UnitType, _}
import scala.annotation.tailrec
import scala.collection.Seq
import scala.collection.immutable.Set
import scala.collection.mutable.ArrayBuffer
import com.intellij.lexer.JavaLexer
/**
* @author Alexander Podkhalyuzin
*/
//some functions are not PsiMethods and are e.g. not visible from java
//see ScSyntheticFunction
trait ScFun extends ScTypeParametersOwner {
def retType: ScType
def paramClauses: Seq[Seq[Parameter]]
def methodType: ScType = {
paramClauses.foldRight[ScType](retType) {
(params: Seq[Parameter], tp: ScType) => new ScMethodType(tp, params, false)(getProject, getResolveScope)
}
}
def polymorphicType: ScType = {
if (typeParameters.length == 0) methodType
else ScTypePolymorphicType(methodType, typeParameters.map(new TypeParameter(_)))
}
}
/**
* Represents Scala's internal function definitions and declarations
*/
trait ScFunction extends ScalaPsiElement with ScMember with ScTypeParametersOwner
with ScParameterOwner with ScDocCommentOwner with ScTypedDefinition
with ScDeclaredElementsHolder with ScAnnotationsHolder with ScMethodLike with ScBlockStatement {
private var synthNavElement: Option[PsiElement] = None
var syntheticCaseClass: Option[ScClass] = None
def setSynthetic(navElement: PsiElement) {
synthNavElement = Some(navElement)
}
def isSyntheticCopy: Boolean = synthNavElement.nonEmpty && name == "copy"
def isSyntheticApply: Boolean = synthNavElement.nonEmpty && name == "apply"
def isSyntheticUnapply: Boolean = synthNavElement.nonEmpty && name == "unapply"
def isSyntheticUnapplySeq: Boolean = synthNavElement.nonEmpty && name == "unapplySeq"
def isSynthetic: Boolean = synthNavElement.nonEmpty
def getSyntheticNavigationElement: Option[PsiElement] = synthNavElement
def hasUnitResultType = {
def hasUnitRT(t: ScType): Boolean = t match {
case UnitType => true
case ScMethodType(result, _, _) => hasUnitRT(result)
case _ => false
}
hasUnitRT(methodType)
}
def isParameterless = paramClauses.clauses.isEmpty
private val probablyRecursive: ThreadLocal[Boolean] = new ThreadLocal[Boolean]() {
override def initialValue(): Boolean = false
}
def isProbablyRecursive = probablyRecursive.get()
def setProbablyRecursive(b: Boolean) {probablyRecursive.set(b)}
def isEmptyParen = paramClauses.clauses.size == 1 && paramClauses.params.size == 0
def addEmptyParens() {
val clause = ScalaPsiElementFactory.createClauseFromText("()", getManager)
paramClauses.addClause(clause)
}
def removeAllClauses() {
paramClauses.clauses.headOption.zip(paramClauses.clauses.lastOption).foreach { p =>
paramClauses.deleteChildRange(p._1, p._2)
}
}
def isNative: Boolean = {
hasAnnotation("scala.native") != None
}
override def hasModifierProperty(name: String): Boolean = {
if (name == "abstract") {
this match {
case _: ScFunctionDeclaration =>
containingClass match {
case t: ScTrait => return true
case c: ScClass if c.hasAbstractModifier => return true
case _ =>
}
case _ =>
}
}
super.hasModifierProperty(name)
}
/**
* This method is important for expected type evaluation.
*/
def getInheritedReturnType: Option[ScType] = {
returnTypeElement match {
case Some(_) => returnType.toOption
case None =>
val superReturnType = superMethodAndSubstitutor match {
case Some((fun: ScFunction, subst)) =>
var typeParamSubst = ScSubstitutor.empty
fun.typeParameters.zip(typeParameters).foreach {
case (oldParam: ScTypeParam, newParam: ScTypeParam) =>
typeParamSubst = typeParamSubst.bindT((oldParam.name, ScalaPsiUtil.getPsiElementId(oldParam)),
new ScTypeParameterType(newParam, subst))
}
fun.returnType.toOption.map(typeParamSubst.followed(subst).subst)
case Some((fun: ScSyntheticFunction, subst)) =>
var typeParamSubst = ScSubstitutor.empty
fun.typeParameters.zip(typeParameters).foreach {
case (oldParam: ScSyntheticTypeParameter, newParam: ScTypeParam) =>
typeParamSubst = typeParamSubst.bindT((oldParam.name, ScalaPsiUtil.getPsiElementId(oldParam)),
new ScTypeParameterType(newParam, subst))
}
Some(subst.subst(fun.retType))
case Some((fun: PsiMethod, subst)) =>
var typeParamSubst = ScSubstitutor.empty
fun.getTypeParameters.zip(typeParameters).foreach {
case (oldParam: PsiTypeParameter, newParam: ScTypeParam) =>
typeParamSubst = typeParamSubst.bindT((oldParam.name, ScalaPsiUtil.getPsiElementId(oldParam)),
new ScTypeParameterType(newParam, subst))
}
Some(typeParamSubst.followed(subst).subst(ScType.create(fun.getReturnType, getProject, getResolveScope)))
case _ => None
}
superReturnType
}
}
override def getTextOffset: Int = nameId.getTextRange.getStartOffset
def hasParameterClause: Boolean = {
if (effectiveParameterClauses.length != 0) return true
superMethod match {
case Some(fun: ScFunction) => fun.hasParameterClause
case Some(psi: PsiMethod) => true
case None => false
}
}
/**
* Signature has repeated param, which is not the last one
*/
def hasMalformedSignature: Boolean = {
val clausesIterator = paramClauses.clauses.iterator
while (clausesIterator.hasNext) {
val clause = clausesIterator.next()
val paramsIterator = clause.parameters.iterator
while (paramsIterator.hasNext) {
val param = paramsIterator.next()
if (paramsIterator.hasNext && param.isRepeatedParameter) return true
}
}
false
}
def definedReturnType: TypeResult[ScType] = {
returnTypeElement match {
case Some(ret) => ret.getType(TypingContext.empty)
case _ if !hasAssign => Success(types.Unit, Some(this))
case _ =>
superMethod match {
case Some(f: ScFunction) => f.definedReturnType
case Some(m: PsiMethod) =>
Success(ScType.create(m.getReturnType, getProject, getResolveScope), Some(this))
case _ => Failure("No defined return type", Some(this))
}
}
}
/**
* Returns pure 'function' type as it was defined as a field with functional value
*/
def methodType(result: Option[ScType]): ScType = {
val clauses = effectiveParameterClauses
val resultType = result match {
case None => returnType.getOrAny
case Some(x) => x
}
if (!hasParameterClause) return resultType
val res = if (clauses.length > 0)
clauses.foldRight[ScType](resultType){(clause: ScParameterClause, tp: ScType) =>
new ScMethodType(tp, clause.getSmartParameters, clause.isImplicit)(getProject, getResolveScope)
}
else new ScMethodType(resultType, Seq.empty, false)(getProject, getResolveScope)
res.asInstanceOf[ScMethodType]
}
/**
* Returns internal type with type parameters.
*/
def polymorphicType(result: Option[ScType] = None): ScType = {
if (typeParameters.length == 0) methodType(result)
else ScTypePolymorphicType(methodType(result), typeParameters.map(new TypeParameter(_)))
}
/**
* Optional Type Element, denotion function's return type
* May be omitted for non-recursive functions
*/
def returnTypeElement: Option[ScTypeElement] = {
this match {
case st: ScalaStubBasedElementImpl[_] =>
val stub = st.getStub
if (stub != null) {
return stub.asInstanceOf[ScFunctionStub].getReturnTypeElement
}
case _ =>
}
findChild(classOf[ScTypeElement])
}
def returnTypeIsDefined: Boolean = !definedReturnType.isEmpty
def hasExplicitType = returnTypeElement.isDefined
def removeExplicitType() {
val colon = children.find(_.getNode.getElementType == ScalaTokenTypes.tCOLON)
(colon, returnTypeElement) match {
case (Some(first), Some(last)) => deleteChildRange(first, last)
case _ =>
}
}
def paramClauses: ScParameters
def parameterList: ScParameters = paramClauses // TODO merge
def isProcedure = paramClauses.clauses.isEmpty
def importantOrderFunction(): Boolean = false
def returnType: TypeResult[ScType] = {
if (importantOrderFunction()) {
val parent = getParent
val data = parent.getUserData(ScFunction.calculatedBlockKey)
if (data != null) returnTypeInner
else {
val children = parent match {
case stub: ScalaStubBasedElementImpl[_] if stub.getStub != null =>
import scala.collection.JavaConverters._
stub.getStub.getChildrenStubs.asScala.map(_.getPsi)
case _ => parent.getChildren.toSeq
}
children.foreach {
case fun: ScFunction if fun.importantOrderFunction() =>
ProgressManager.checkCanceled()
fun.returnTypeInner
case _ =>
}
parent.putUserData(ScFunction.calculatedBlockKey, java.lang.Boolean.TRUE)
returnTypeInner
}
} else returnTypeInner
}
def returnTypeInner: TypeResult[ScType]
def declaredType: TypeResult[ScType] = wrap(returnTypeElement) flatMap (_.getType(TypingContext.empty))
def clauses: Option[ScParameters] = Some(paramClauses)
def paramTypes: Seq[ScType] = parameters.map {_.getType(TypingContext.empty).getOrNothing}
def effectiveParameterClauses: Seq[ScParameterClause] = {
CachesUtil.get(this, CachesUtil.FUNCTION_EFFECTIVE_PARAMETER_CLAUSE_KEY,
new CachesUtil.MyProvider(this, (f: ScFunction) => f.paramClauses.clauses ++ f.syntheticParamClause)
(PsiModificationTracker.MODIFICATION_COUNT))
}
private def syntheticParamClause: Option[ScParameterClause] = {
val hasImplicit = clauses.exists(_.clauses.exists(_.isImplicit))
if (isConstructor) {
containingClass match {
case owner: ScTypeParametersOwner =>
if (hasImplicit) None else ScalaPsiUtil.syntheticParamClause(owner, paramClauses, classParam = false)
case _ => None
}
} else {
if (hasImplicit) None else ScalaPsiUtil.syntheticParamClause(this, paramClauses, classParam = false)
}
}
def declaredElements = Seq(this)
/**
* Seek parameter with appropriate name in appropriate parameter clause.
* @param name parameter name
* @param clausePosition = -1, effective clause number, if -1 then parameter in any explicit? clause
*/
def getParamByName(name: String, clausePosition: Int = -1): Option[ScParameter] = {
clausePosition match {
case -1 =>
parameters.find { case param =>
ScalaPsiUtil.memberNamesEquals(param.name, name) ||
param.deprecatedName.exists(ScalaPsiUtil.memberNamesEquals(_, name))
}
case i if i < 0 || i >= effectiveParameterClauses.length => None
case _ =>
effectiveParameterClauses.apply(clausePosition).parameters.find { case param =>
ScalaPsiUtil.memberNamesEquals(param.name, name) ||
param.deprecatedName.exists(ScalaPsiUtil.memberNamesEquals(_, name))
}
}
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitFunction(this)
}
def getGetterOrSetterFunction: Option[ScFunction] = {
containingClass match {
case clazz: ScTemplateDefinition =>
if (name.endsWith("_=")) {
clazz.functions.find(_.name == name.substring(0, name.length - 2))
} else if (!hasParameterClause) {
clazz.functions.find(_.name == name + "_=")
} else None
case _ => None
}
}
def isBridge: Boolean = {
//todo: fix algorithm for annotation resolve to not resolve objects (if it's possible)
//heuristic algorithm to avoid SOE in MixinNodes.build
annotations.exists(annot => {
annot.typeElement match {
case s: ScSimpleTypeElement => s.reference match {
case Some(ref) => ref.refName == "bridge"
case _ => false
}
case _ => false
}
})
}
def addParameter(param: ScParameter): ScFunction = {
if (paramClauses.clauses.length > 0)
paramClauses.clauses.apply(0).addParameter(param)
else {
val clause: ScParameterClause = ScalaPsiElementFactory.createClauseFromText("()", getManager)
val newClause = clause.addParameter(param)
paramClauses.addClause(newClause)
}
this
}
def getTypeParameters: Array[PsiTypeParameter] = {
val params = typeParameters
val size = params.length
val result = PsiTypeParameter.ARRAY_FACTORY.create(size)
var i = 0
while (i < size) {
result(i) = params(i).asInstanceOf[PsiTypeParameter]
i += 1
}
result
}
def getTypeParameterList = new FakePsiTypeParameterList(getManager, getLanguage, typeParameters.toArray, this)
def hasTypeParameters = typeParameters.length > 0
def getParameterList: ScParameters = paramClauses
private val functionWrapper: ConcurrentHashMap[(Boolean, Boolean, Option[PsiClass]), (Seq[ScFunctionWrapper], Long)] =
new ConcurrentHashMap()
private def isJavaVarargs: Boolean = {
if (hasAnnotation("scala.annotation.varargs").isDefined) true
else {
superMethod match {
case f: ScFunction => f.isJavaVarargs
case m: PsiMethod => m.isVarArgs
case _ => false
}
}
}
/**
* @return Empty array, if containing class is null.
*/
def getFunctionWrappers(isStatic: Boolean, isInterface: Boolean, cClass: Option[PsiClass] = None): Seq[ScFunctionWrapper] = {
val curModCount = getManager.getModificationTracker.getOutOfCodeBlockModificationCount
val r = functionWrapper.get(isStatic, isInterface, cClass)
if (r != null && r._2 == curModCount) {
return r._1
}
val buffer = new ArrayBuffer[ScFunctionWrapper]
if (cClass != None || containingClass != null) {
buffer += new ScFunctionWrapper(this, isStatic, isInterface, cClass)
for {
clause <- clauses
first <- clause.clauses.headOption
if first.hasRepeatedParam
if isJavaVarargs
} {
buffer += new ScFunctionWrapper(this, isStatic, isInterface, cClass, isJavaVarargs = true)
}
if (!isConstructor) {
for (i <- 0 until this.parameters.length if parameters(i).baseDefaultParam) {
buffer += new ScFunctionWrapper(this, isStatic, isInterface, cClass, forDefault = Some(i + 1))
}
}
}
val result: Seq[ScFunctionWrapper] = buffer.toSeq
functionWrapper.put((isStatic, isInterface, cClass), (result, curModCount))
result
}
def parameters: Seq[ScParameter] = paramClauses.params
override def getIcon(flags: Int) = Icons.FUNCTION
def getReturnType: PsiType = {
if (DumbServiceImpl.getInstance(getProject).isDumb || !SyntheticClasses.get(getProject).isClassesRegistered) {
return null //no resolve during dumb mode or while synthetic classes is not registered
}
CachesUtil.get(
this, CachesUtil.PSI_RETURN_TYPE_KEY,
new CachesUtil.MyProvider(this, {ic: ScFunction => ic.getReturnTypeImpl})
(PsiModificationTracker.MODIFICATION_COUNT)
)
}
private def getReturnTypeImpl: PsiType = {
val tp = getType(TypingContext.empty).getOrAny
tp match {
case ScFunctionType(rt, _) => ScType.toPsi(rt, getProject, getResolveScope)
case _ => ScType.toPsi(tp, getProject, getResolveScope)
}
}
def superMethods: Seq[PsiMethod] = {
val clazz = containingClass
if (clazz != null) TypeDefinitionMembers.getSignatures(clazz).forName(ScalaPsiUtil.convertMemberName(name))._1.
get(new PhysicalSignature(this, ScSubstitutor.empty)).getOrElse(return Seq.empty).supers.
filter(_.info.isInstanceOf[PhysicalSignature]).map {_.info.asInstanceOf[PhysicalSignature].method}
else Seq.empty
}
def superMethod: Option[PsiMethod] = superMethodAndSubstitutor.map(_._1)
def superMethodAndSubstitutor: Option[(PsiMethod, ScSubstitutor)] = {
val clazz = containingClass
if (clazz != null) {
val option = TypeDefinitionMembers.getSignatures(clazz).forName(name)._1.
fastPhysicalSignatureGet(new PhysicalSignature(this, ScSubstitutor.empty))
if (option == None) return None
option.get.primarySuper.filter(_.info.isInstanceOf[PhysicalSignature]).
map(node => (node.info.asInstanceOf[PhysicalSignature].method, node.info.substitutor))
}
else None
}
def superSignatures: Seq[Signature] = {
val clazz = containingClass
val s = new PhysicalSignature(this, ScSubstitutor.empty)
if (clazz == null) return Seq(s)
val t = TypeDefinitionMembers.getSignatures(clazz).forName(ScalaPsiUtil.convertMemberName(name))._1.
fastPhysicalSignatureGet(s) match {
case Some(x) => x.supers.map {_.info}
case None => Seq[Signature]()
}
t
}
def superSignaturesIncludingSelfType: Seq[Signature] = {
val clazz = containingClass
val s = new PhysicalSignature(this, ScSubstitutor.empty)
if (clazz == null) return Seq(s)
val withSelf = clazz.selfType != None
if (withSelf) {
val signs = TypeDefinitionMembers.getSelfTypeSignatures(clazz).forName(ScalaPsiUtil.convertMemberName(name))._1
signs.fastPhysicalSignatureGet(s) match {
case Some(x) if x.info.namedElement == this => x.supers.map { _.info }
case Some(x) => x.supers.filter {_.info.namedElement != this }.map { _.info } :+ x.info
case None => signs.get(s) match {
case Some(x) if x.info.namedElement == this => x.supers.map { _.info }
case Some(x) => x.supers.filter {_.info.namedElement != this }.map { _.info } :+ x.info
case None => Seq.empty
}
}
} else {
TypeDefinitionMembers.getSignatures(clazz).forName(ScalaPsiUtil.convertMemberName(name))._1.
fastPhysicalSignatureGet(s) match {
case Some(x) => x.supers.map { _.info }
case None => Seq.empty
}
}
}
override def getNameIdentifier: PsiIdentifier = new JavaIdentifier(nameId)
def findDeepestSuperMethod: PsiMethod = {
val s = superMethods
if (s.length == 0) null
else s(s.length - 1)
}
def getReturnTypeElement = null
def findSuperMethods(parentClass: PsiClass) = PsiMethod.EMPTY_ARRAY
def findSuperMethods(checkAccess: Boolean) = PsiMethod.EMPTY_ARRAY
def findSuperMethods = superMethods.toArray // TODO which other xxxSuperMethods can/should be implemented?
def findDeepestSuperMethods = PsiMethod.EMPTY_ARRAY
def getReturnTypeNoResolve: PsiType = PsiType.VOID
def getPom = null
def findSuperMethodSignaturesIncludingStatic(checkAccess: Boolean) =
new util.ArrayList[MethodSignatureBackedByPsiMethod]()
def getSignature(substitutor: PsiSubstitutor) = MethodSignatureBackedByPsiMethod.create(this, substitutor)
//todo implement me!
def isVarArgs = false
def isConstructor = name == "this"
def getBody: PsiCodeBlock = null
def getThrowsList = new FakePsiReferenceList(getManager, getLanguage, Role.THROWS_LIST) {
override def getReferenceElements: Array[PsiJavaCodeReferenceElement] = {
getReferencedTypes.map {
tp => PsiElementFactory.SERVICE.getInstance(getProject).createReferenceElementByType(tp)
}
}
override def getReferencedTypes: Array[PsiClassType] = {
hasAnnotation("scala.throws") match {
case Some(annotation) =>
annotation.constructor.args.map(_.exprs).getOrElse(Seq.empty).flatMap { expr =>
expr.getType(TypingContext.empty) match {
case Success(ScParameterizedType(des, Seq(arg)), _) => ScType.extractClass(des) match {
case Some(clazz) if clazz.qualifiedName == "java.lang.Class" =>
ScType.toPsi(arg, getProject, getResolveScope) match {
case c: PsiClassType => Seq(c)
case _ => Seq.empty
}
case _ => Seq.empty
}
case _ => Seq.empty
}
}.toArray
case _ => PsiClassType.EMPTY_ARRAY
}
}
}
def getType(ctx: TypingContext) = {
returnType match {
case Success(tp: ScType, _) =>
var res: TypeResult[ScType] = Success(tp, None)
var i = paramClauses.clauses.length - 1
while (i >= 0) {
val cl = paramClauses.clauses.apply(i)
val paramTypes = cl.parameters.map(_.getType(ctx))
res match {
case Success(t: ScType, _) =>
res = collectFailures(paramTypes, Nothing)(ScFunctionType(t, _)(getProject, getResolveScope))
case _ =>
}
i = i - 1
}
res
case x => x
}
}
override protected def isSimilarMemberForNavigation(m: ScMember, strictCheck: Boolean) = m match {
case f: ScFunction => f.name == name && {
if (strictCheck) new PhysicalSignature(this, ScSubstitutor.empty).
paramTypesEquiv(new PhysicalSignature(f, ScSubstitutor.empty))
else true
}
case _ => false
}
def hasAssign = getNode.getChildren(TokenSet.create(ScalaTokenTypes.tASSIGN)).size > 0
def getHierarchicalMethodSignature: HierarchicalMethodSignature = {
new HierarchicalMethodSignatureImpl(getSignature(PsiSubstitutor.EMPTY))
}
override def isDeprecated = {
hasAnnotation("scala.deprecated") != None || hasAnnotation("java.lang.Deprecated") != None
}
override def getName = {
val res = if (isConstructor && getContainingClass != null) getContainingClass.getName else super.getName
if (JavaLexer.isKeyword(res, LanguageLevel.HIGHEST)) "_mth" + res
else res
}
override def setName(name: String): PsiElement = {
if (isConstructor) this
else super.setName(name)
}
override def getOriginalElement: PsiElement = {
val ccontainingClass = containingClass
if (ccontainingClass == null) return this
val originalClass: PsiClass = ccontainingClass.getOriginalElement.asInstanceOf[PsiClass]
if (ccontainingClass eq originalClass) return this
if (!originalClass.isInstanceOf[ScTypeDefinition]) return this
val c = originalClass.asInstanceOf[ScTypeDefinition]
val membersIterator = c.members.iterator
val buf: ArrayBuffer[ScMember] = new ArrayBuffer[ScMember]
while (membersIterator.hasNext) {
val member = membersIterator.next()
if (isSimilarMemberForNavigation(member, strictCheck = false)) buf += member
}
if (buf.length == 0) this
else if (buf.length == 1) buf(0)
else {
val filter = buf.filter(isSimilarMemberForNavigation(_, strictCheck = true))
if (filter.length == 0) buf(0)
else filter(0)
}
}
def getTypeNoImplicits(ctx: TypingContext): TypeResult[ScType] = {
returnType match {
case Success(tp: ScType, _) =>
var res: TypeResult[ScType] = Success(tp, None)
var i = paramClauses.clauses.length - 1
while (i >= 0) {
val cl = paramClauses.clauses.apply(i)
if (!cl.isImplicit) {
val paramTypes = cl.parameters.map(_.getType(ctx))
res match {
case Success(t: ScType, _) =>
res = collectFailures(paramTypes, Nothing)(ScFunctionType(t, _)(getProject, getResolveScope))
case _ =>
}
}
i = i - 1
}
res
case failure => failure
}
}
}
object ScFunction {
object Name {
val Apply = "apply"
val Update = "update"
val Unapply = "unapply"
val UnapplySeq = "unapplySeq"
val Foreach = "foreach"
val Map = "map"
val FlatMap = "flatMap"
val Filter = "filter"
val WithFilter = "withFilter"
val Unapplies: Set[String] = Set(Unapply, UnapplySeq)
val ForComprehensions: Set[String] = Set(Foreach, Map, FlatMap, Filter, WithFilter)
val Special: Set[String] = Set(Apply, Update) ++ Unapplies ++ ForComprehensions
}
/** Is this function sometimes invoked without it's name appearing at the call site? */
def isSpecial(name: String): Boolean = Name.Special(name)
private val calculatedBlockKey: Key[java.lang.Boolean] = Key.create("calculated.function.returns.block")
@tailrec
def getCompoundCopy(pTypes: List[List[ScType]], tParams: List[TypeParameter], rt: ScType, fun: ScFunction): ScFunction = {
fun match {
case light: ScLightFunctionDeclaration => getCompoundCopy(pTypes, tParams, rt, light.fun)
case light: ScLightFunctionDefinition => getCompoundCopy(pTypes, tParams, rt, light.fun)
case decl: ScFunctionDeclaration => new ScLightFunctionDeclaration(pTypes, tParams, rt, decl)
case definition: ScFunctionDefinition => new ScLightFunctionDefinition(pTypes, tParams, rt, definition)
}
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScFunction.scala
|
Scala
|
apache-2.0
| 27,071 |
package cn.changhong.web.controller.auth
import cn.changhong.web.persistent.SlickDBPoolManager
import cn.changhong.web.persistent.Tables.Tables.User
import cn.changhong.web.router.{RestAction, RestAopRouterProvider, LogAopAction}
import cn.changhong.web.util._
import com.twitter.finagle.http.Response
import org.jboss.netty.handler.codec.http.HttpMethod
import scala.slick.driver.MySQLDriver.simple._
import cn.changhong.web.persistent.Tables.Tables._
/**
* Created by yangguo on 14-12-8.
*/
object UserAuthAction extends RestAction[RestRequest,Response]{
override def apply(request: RestRequest): Response = {
(request.method,request.path(2)) match {
case (HttpMethod.POST, "token") => UserTokenAction(request)
case (HttpMethod.PUT, "register") => UserRegisterTokenAction(request)
case _ => NotFindActionException(request.underlying.getUri)
}
}
}
object UserTokenAction extends UserTokenAction with LogAopAction
class UserTokenAction extends RestAopRouterProvider{
override def aopAction(request: RestRequest): Response = {
val requestMap = Parser.ChannelBufferToJsonStringToMap(request.underlying.getContent)
val (u_type,u_name)= AccountUtil.decodeUserAccount(requestMap.get("account"))
val password = requestMap.get("password")
val uType = requestMap.get("uType")
val user = SlickDBPoolManager.DBPool.withTransaction { implicit session =>
u_type match{
case AccountUtil.account_login_type_email=>
User.filter { u => u.password === password && u.email === u_name}.firstOption
case AccountUtil.account_login_type_name=>
User.filter { u => u.password === password && u.username === u_name}.firstOption
case AccountUtil.account_login_type_phone=>
User.filter { u => u.password === password && u.iphone === u_name}.firstOption
case _=>throw new RestException(RestResponseInlineCode.Invalid_authorization_parameters,"无效的账号类型")
}
}
val content = {
user match {
case Some(u) =>
if (u.utype.equals(uType.get)) {
u.status match {
case AccountUtil.account_status_normal | AccountUtil.account_status_inactive | AccountUtil.account_status_supervise=>
val token = AccountUtil.createResponseToken(TokenUtil.createToken(request.logBean.clientId, u.id.toString, AccountUtil.token_type_long),u)
RestResponseContent(RestResponseInlineCode.succeed, token)
case AccountUtil.account_status_freeze =>
RestResponseContent(RestResponseInlineCode.login_user_freeze, "账号暂时被冻结")
case AccountUtil.account_status_delete=>
RestResponseContent(RestResponseInlineCode.login_user_delete, "账号已经被删除")
case AccountUtil.account_status_suicide=>
RestResponseContent(RestResponseInlineCode.login_user_suicide,"账号已经注销")
}
} else {
RestResponseContent(RestResponseInlineCode.invalid_request_parameters, "未找到账号类型为" + uType + "的账号")
}
case None => RestResponseContent(RestResponseInlineCode.user_not_exit, "不匹配任何账户")
}
}
DefaultHttpResponse.createResponse(content)
}
}
object UserRegisterTokenAction extends UserRegisterTokenAction with LogAopAction
class UserRegisterTokenAction extends RestAopRouterProvider{
def IsAccountExist(u_type:String,u_name:String): Option[UserRow] ={
SlickDBPoolManager.DBPool.withTransaction { implicit session =>
u_type match{
case AccountUtil.account_login_type_email=>
User.filter { u => u.email === u_name}.firstOption
case AccountUtil.account_login_type_name=>
User.filter { u =>u.username === u_name}.firstOption
case AccountUtil.account_login_type_phone=>
User.filter { u => u.iphone === u_name}.firstOption
case _=>throw new RestException(RestResponseInlineCode.Invalid_authorization_parameters,"无效的账号类型")
}
}
}
override def aopAction(request: RestRequest): Response = {
val registerMap = Parser.ChannelBufferToJsonStringToMap(request.underlying.getContent)
val (u_type, u_name) = AccountUtil.decodeUserAccount(registerMap.get("account"))
IsAccountExist(u_type, u_name) match {
case Some(u) => throw new RestException(RestResponseInlineCode.already_user_account, "账号已经存在")
case None => {
val passwd = registerMap.get("password") match {
case Some(x) => x
case None => throw new RestException(RestResponseInlineCode.Invalid_authorization_parameters, "密码不能为空")
}
val uType = registerMap.get("uType") match {
case Some(x) => x
case None => throw new RestException(RestResponseInlineCode.Invalid_authorization_parameters, "注册账号类型不能为空")
}
val (username, email, phone) = u_type match {
case AccountUtil.account_login_type_email =>
val t_name = registerMap.get("account").get
(t_name, u_name, t_name)
case AccountUtil.account_login_type_phone =>
val t_name = registerMap.get("account").get
(t_name, t_name, u_name)
case AccountUtil.account_login_type_name =>
val t_name = registerMap.get("account").get
(u_name, t_name, t_name)
case _ => throw new RestException(RestResponseInlineCode.Invalid_authorization_parameters, "无效的注册账号类型")
}
val user = UserRow(-100, username, phone, email, passwd, AccountUtil.account_status_normal, uType, AccountUtil.account_isbind_no, AccountUtil.promoted_type_no)
val uid = SlickDBPoolManager.DBPool.withTransaction { implicit session =>
(User returning User.map(_.id)).insert(user)
}
user.id=uid
val token = AccountUtil.createResponseToken(TokenUtil.createToken(request.logBean.clientId, uid.toString, AccountUtil.token_type_long),user)
val content = RestResponseContent(RestResponseInlineCode.succeed, token)
DefaultHttpResponse.createResponse(content)
}
}
}
}
object AccountUtil {
def createResponseToken(tk:Map[String,String],user:UserRow):Map[String,String]= {
var token = tk
token += ("u_id"->user.id.toString)
token += ("u_status" -> user.status)
token += ("u_bind" -> user.bind)
token += ("u_promoted_type" -> user.promotedType)
token += ("u_type" -> user.utype)
token
}
//账号状态
val account_status_freeze = "freeze"//被冻结状态
val account_status_normal = "normal"//正常状态
val account_status_inactive="inactive"//非活动状态
val account_status_delete = "delete"//删除状态
val account_status_supervise = "supervise"//被监管状态
val account_status_suicide = "suicide"//注销状态
//账号有无绑定
val account_isbind_yes="bind"//账号已绑定
val account_isbind_no="nobind"//账号未绑定
//有无实名制认证
val promoted_type_yes="authentication"//实名认证
val promoted_type_no="not_authentication"//未实名认证
//账号类型
val account_type_user="user"
val account_type_3rdpart="3rdpart"
val account_type_app="app"
//token类型
val token_type_forever="forever"//永久
val token_type_long="long"//长时间
val token_type_temp="temp"//临时
val token_type_short="short"//短时间
//账号登陆类型
val account_login_type_email="email"
val account_login_type_phone="phone"
val account_login_type_name="name"
val request_key_user_type="uType"
val request_key_access_token="Access_Token"
val request_key_user_id="User_Id"
val request_key_client_id="Client_Id"
def decodeUserAccount(account:Option[String]) :(String,String)={
account match{
case Some(u)=>
val a_start=u.indexOf('_')
if(a_start<0 || a_start == u.length-1) throw new RestException(RestResponseInlineCode.Invalid_authorization_parameters,"无效的用户名格式")
(u.substring(0,a_start),u.substring(a_start+1))
case None=>throw new RestException(RestResponseInlineCode.Invalid_authorization_parameters,"无效的用户名")
}
}
}
|
guoyang2011/myfinagle
|
WebTemplate/src/main/scala/cn/changhong/web/controller/auth/UserAuthService.scala
|
Scala
|
apache-2.0
| 8,260 |
package mypipe.producer.stdout
import mypipe.api.event._
import mypipe.api.producer.Producer
import org.slf4j.LoggerFactory
import com.typesafe.config.Config
class StdoutProducer(config: Config) extends Producer(config) {
protected val mutations = scala.collection.mutable.ListBuffer[String]()
protected val log = LoggerFactory.getLogger(getClass)
override def handleAlter(event: AlterEvent): Boolean = {
log.info(s"\\n$event\\n")
true
}
override def flush(): Boolean = {
if (mutations.nonEmpty) {
log.info("\\n" + mutations.mkString("\\n"))
mutations.clear()
}
true
}
override def queueList(mutationz: List[Mutation]): Boolean = {
mutationz.foreach(queue)
true
}
override def queue(mutation: Mutation): Boolean = {
// TODO: quote column values if they are strings before printing
mutation match {
case i: InsertMutation ⇒
mutations += s"INSERT INTO ${i.table.db}.${i.table.name} (${i.table.columns.map(_.name).mkString(", ")}) VALUES ${i.rows.map("(" + _.columns.values.map(_.value).mkString(", ") + ")").mkString(",")}"
case u: UpdateMutation ⇒
u.rows.foreach(rr ⇒ {
val old = rr._1
val cur = rr._2
val pKeyColNames = u.table.primaryKey.map(pKey ⇒ pKey.columns.map(_.name))
val p = pKeyColNames.map(colName ⇒ {
val cols = old.columns
cols.filter(_._1.equals(colName))
cols.head
})
val pKeyVals = p.map(_._2.value.toString)
val where = pKeyColNames
.map(_.zip(pKeyVals)
.map(kv ⇒ kv._1 + "=" + kv._2))
.map(_.mkString(", "))
.map(w ⇒ s"WHERE ($w)").getOrElse("")
val curValues = cur.columns.values.map(_.value)
val colNames = u.table.columns.map(_.name)
val updates = colNames.zip(curValues).map(kv ⇒ kv._1 + "=" + kv._2).mkString(", ")
mutations += s"UPDATE ${u.table.db}.${u.table.name} SET ($updates) $where"
})
case d: DeleteMutation ⇒
d.rows.foreach(row ⇒ {
val pKeyColNames = if (d.table.primaryKey.isDefined) d.table.primaryKey.get.columns.map(_.name) else List.empty[String]
val p = pKeyColNames.map(colName ⇒ {
val cols = row.columns
cols.filter(_._1 == colName)
cols.head
})
val pKeyVals = p.map(_._2.value.toString)
val where = pKeyColNames.zip(pKeyVals).map(kv ⇒ kv._1 + "=" + kv._2).mkString(", ")
mutations += s"DELETE FROM ${d.table.db}.${d.table.name} WHERE ($where)"
})
case _ ⇒ log.info(s"Ignored mutation: $mutation")
}
true
}
override def toString: String = {
"StdoutProducer"
}
}
|
tramchamploo/mypipe
|
mypipe-producers/src/main/scala/mypipe/producer/stdout/StdoutProducer.scala
|
Scala
|
apache-2.0
| 2,788 |
object timeofday {
class DateError extends Exception
/** Simulating properties in Scala
* (example 4.2.1 in the Scala Language Specification)
*/
class TimeOfDayVar {
private var h, m, s: Int = 0
def hours = h
/** A method 'ident_=' is a setter for 'ident'. 'code.ident = ...' will
* be translated to a call to 'ident_='
*/
def hours_= (h: Int) =
if (0 <= h && h < 24) this.h = h
else throw new DateError()
def minutes = m
def minutes_= (m: Int) =
if (0 <= m && m < 60) this.m = m
else throw new DateError()
def seconds = s
def seconds_= (s: Int) =
if (0 <= s && s < 60) this./*!*/s = s
else throw new DateError()
}
def main(args: Array[String]) {
val d = new TimeOfDayVar
d.hours = 8; d./*!*/minutes = 30; d.seconds = 0
d.hours/*#*/ = 25 // throws a DateError exception
}
}
|
felixmulder/scala
|
test/disabled/presentation/timeofday/src/timeofday.scala
|
Scala
|
bsd-3-clause
| 895 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.annotation.{Since, Experimental}
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.param.{BooleanParam, ParamMap, StringArrayParam}
import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol}
import org.apache.spark.ml.util._
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types.{ArrayType, StringType, StructField, StructType}
/**
* stop words list
*/
private[spark] object StopWords {
/**
* Use the same default stopwords list as scikit-learn.
* The original list can be found from "Glasgow Information Retrieval Group"
* [[http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words]]
*/
val English = Array( "a", "about", "above", "across", "after", "afterwards", "again",
"against", "all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fify", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves")
}
/**
* :: Experimental ::
* A feature transformer that filters out stop words from input.
* Note: null values from input array are preserved unless adding null to stopWords explicitly.
* @see [[http://en.wikipedia.org/wiki/Stop_words]]
*/
@Experimental
class StopWordsRemover(override val uid: String)
extends Transformer with HasInputCol with HasOutputCol with DefaultParamsWritable {
def this() = this(Identifiable.randomUID("stopWords"))
/** @group setParam */
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
def setOutputCol(value: String): this.type = set(outputCol, value)
/**
* the stop words set to be filtered out
* Default: [[StopWords.English]]
* @group param
*/
val stopWords: StringArrayParam = new StringArrayParam(this, "stopWords", "stop words")
/** @group setParam */
def setStopWords(value: Array[String]): this.type = set(stopWords, value)
/** @group getParam */
def getStopWords: Array[String] = $(stopWords)
/**
* whether to do a case sensitive comparison over the stop words
* Default: false
* @group param
*/
val caseSensitive: BooleanParam = new BooleanParam(this, "caseSensitive",
"whether to do case-sensitive comparison during filtering")
/** @group setParam */
def setCaseSensitive(value: Boolean): this.type = set(caseSensitive, value)
/** @group getParam */
def getCaseSensitive: Boolean = $(caseSensitive)
setDefault(stopWords -> StopWords.English, caseSensitive -> false)
override def transform(dataset: DataFrame): DataFrame = {
val outputSchema = transformSchema(dataset.schema)
val t = if ($(caseSensitive)) {
val stopWordsSet = $(stopWords).toSet
udf { terms: Seq[String] =>
terms.filter(s => !stopWordsSet.contains(s))
}
} else {
val toLower = (s: String) => if (s != null) s.toLowerCase else s
val lowerStopWords = $(stopWords).map(toLower(_)).toSet
udf { terms: Seq[String] =>
terms.filter(s => !lowerStopWords.contains(toLower(s)))
}
}
val metadata = outputSchema($(outputCol)).metadata
dataset.select(col("*"), t(col($(inputCol))).as($(outputCol), metadata))
}
override def transformSchema(schema: StructType): StructType = {
val inputType = schema($(inputCol)).dataType
require(inputType.sameType(ArrayType(StringType)),
s"Input type must be ArrayType(StringType) but got $inputType.")
SchemaUtils.appendColumn(schema, $(outputCol), inputType, schema($(inputCol)).nullable)
}
override def copy(extra: ParamMap): StopWordsRemover = defaultCopy(extra)
}
@Since("1.6.0")
object StopWordsRemover extends DefaultParamsReadable[StopWordsRemover] {
@Since("1.6.0")
override def load(path: String): StopWordsRemover = super.load(path)
}
|
chenc10/Spark-PAF
|
mllib/src/main/scala/org/apache/spark/ml/feature/StopWordsRemover.scala
|
Scala
|
apache-2.0
| 7,442 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark.jts.encoders
import java.lang
import org.apache.spark.sql.{Encoder, Encoders}
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import scala.reflect.runtime.universe._
/**
* These encoders exist only for simplifying the construction of DataFrame/Dataset DSL
* functions. End users should get their default encoders via the Spark recommended
* pattern:
* {{{
* val spark: SparkSession = ...
* import spark.implicits._
* }}}
*
*/
private[jts] trait SparkDefaultEncoders {
implicit def stringEncoder: Encoder[String] = Encoders.STRING
implicit def jFloatEncoder: Encoder[lang.Float] = Encoders.FLOAT
implicit def doubleEncoder: Encoder[Double] = Encoders.scalaDouble
implicit def jDoubleEncoder: Encoder[lang.Double] = Encoders.DOUBLE
implicit def intEncoder: Encoder[Int] = Encoders.scalaInt
implicit def jBooleanEncoder: Encoder[lang.Boolean] = Encoders.BOOLEAN
implicit def booleanEncoder: Encoder[Boolean] = Encoders.scalaBoolean
implicit def arrayEncoder[T: TypeTag]: Encoder[Array[T]] = ExpressionEncoder()
}
private[jts] object SparkDefaultEncoders extends SparkDefaultEncoders
|
ddseapy/geomesa
|
geomesa-spark/geomesa-spark-jts/src/main/scala/org/locationtech/geomesa/spark/jts/encoders/SparkDefaultEncoders.scala
|
Scala
|
apache-2.0
| 1,633 |
package org.scaladebugger.test.steps
import org.scaladebugger.test.helpers.Stubs._
/**
* Provides test of performing basic step in/out/over in Scala situations
* involving iterations.
*
* @note Should have a class name of org.scaladebugger.test.steps.BasicIterations
*/
object BasicIterations {
def main(args: Array[String]) = {
val totalIterations = 3
// Test for comprehension
for (x <- 1 to totalIterations) {
noop(x)
}
// Test foreach
(1 to totalIterations).foreach {
noop
}
// Test map
(1 to totalIterations).map {
ret
}
// Test "reduce"
(1 to totalIterations).foldLeft(0) { (acc, i) =>
ret(i)
}
// Create a function object that loops
val myFunction = (x: Int) => (1 to x).foreach {
noop
}
// Create a method that loops
def myMethod(x: Int) = (1 to x).foreach {
noop
}
myFunction(totalIterations)
myMethod(totalIterations)
noop(None)
}
}
|
ensime/scala-debugger
|
scala-debugger-test/src/main/scala/org/scaladebugger/test/steps/BasicIterations.scala
|
Scala
|
apache-2.0
| 987 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection
import scala.collection.mutable.HashSet
import org.apache.spark.SparkFunSuite
class AppendOnlyMapSuite extends SparkFunSuite {
test("initialization") {
val goodMap1 = new AppendOnlyMap[Int, Int](1)
assert(goodMap1.size === 0)
val goodMap2 = new AppendOnlyMap[Int, Int](255)
assert(goodMap2.size === 0)
val goodMap3 = new AppendOnlyMap[Int, Int](256)
assert(goodMap3.size === 0)
intercept[IllegalArgumentException] {
new AppendOnlyMap[Int, Int](1 << 30) // Invalid map size: bigger than 2^29
}
intercept[IllegalArgumentException] {
new AppendOnlyMap[Int, Int](-1)
}
intercept[IllegalArgumentException] {
new AppendOnlyMap[Int, Int](0)
}
}
test("object keys and values") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
for (i <- 1 to 100) {
assert(map("" + i) === "" + i)
}
assert(map("0") === null)
assert(map("101") === null)
assert(map(null) === null)
val set = new HashSet[(String, String)]
for ((k, v) <- map) { // Test the foreach method
set += ((k, v))
}
assert(set === (1 to 100).map(_.toString).map(x => (x, x)).toSet)
}
test("primitive keys and values") {
val map = new AppendOnlyMap[Int, Int]()
for (i <- 1 to 100) {
map(i) = i
}
assert(map.size === 100)
for (i <- 1 to 100) {
assert(map(i) === i)
}
assert(map(0) === null)
assert(map(101) === null)
val set = new HashSet[(Int, Int)]
for ((k, v) <- map) { // Test the foreach method
set += ((k, v))
}
assert(set === (1 to 100).map(x => (x, x)).toSet)
}
test("null keys") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
assert(map(null) === null)
map(null) = "hello"
assert(map.size === 101)
assert(map(null) === "hello")
}
test("null values") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = null
}
assert(map.size === 100)
assert(map("1") === null)
assert(map(null) === null)
assert(map.size === 100)
map(null) = null
assert(map.size === 101)
assert(map(null) === null)
}
test("changeValue") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
for (i <- 1 to 100) {
val res = map.changeValue("" + i, (hadValue, oldValue) => {
assert(hadValue)
assert(oldValue === "" + i)
oldValue + "!"
})
assert(res === i + "!")
}
// Iterate from 101 to 400 to make sure the map grows a couple of times, because we had a
// bug where changeValue would return the wrong result when the map grew on that insert
for (i <- 101 to 400) {
val res = map.changeValue("" + i, (hadValue, oldValue) => {
assert(hadValue === false)
i + "!"
})
assert(res === i + "!")
}
assert(map.size === 400)
assert(map(null) === null)
map.changeValue(null, (hadValue, oldValue) => {
assert(hadValue === false)
"null!"
})
assert(map.size === 401)
map.changeValue(null, (hadValue, oldValue) => {
assert(hadValue)
assert(oldValue === "null!")
"null!!"
})
assert(map.size === 401)
}
test("inserting in capacity-1 map") {
val map = new AppendOnlyMap[String, String](1)
for (i <- 1 to 100) {
map("" + i) = "" + i
}
assert(map.size === 100)
for (i <- 1 to 100) {
assert(map("" + i) === "" + i)
}
}
test("destructive sort") {
val map = new AppendOnlyMap[String, String]()
for (i <- 1 to 100) {
map("" + i) = "" + i
}
map.update(null, "happy new year!")
try {
map.apply("1")
map.update("1", "2013")
map.changeValue("1", (hadValue, oldValue) => "2014")
map.iterator
} catch {
case e: IllegalStateException => fail()
}
val it = map.destructiveSortedIterator((key1: String, key2: String) => {
val x = if (key1 != null) key1.toInt else Int.MinValue
val y = if (key2 != null) key2.toInt else Int.MinValue
x.compareTo(y)
})
// Should be sorted by key
assert(it.hasNext)
var previous = it.next()
assert(previous == ((null, "happy new year!")))
previous = it.next()
assert(previous == (("1", "2014")))
while (it.hasNext) {
val kv = it.next()
assert(kv._1.toInt > previous._1.toInt)
previous = kv
}
// All subsequent calls to apply, update, changeValue and iterator should throw exception
intercept[AssertionError] { map.apply("1") }
intercept[AssertionError] { map.update("1", "2013") }
intercept[AssertionError] { map.changeValue("1", (hadValue, oldValue) => "2014") }
intercept[AssertionError] { map.iterator }
}
}
|
shaneknapp/spark
|
core/src/test/scala/org/apache/spark/util/collection/AppendOnlyMapSuite.scala
|
Scala
|
apache-2.0
| 5,818 |
package com.github.chengpohi.domain.search
import akka.http.scaladsl.server.Route
import com.github.chengpohi.infrastructure.util.AdjointUtils._
import com.github.chengpohi.infrastructure.{BaseController, Repository}
import org.apache.logging.log4j.LogManager
trait ActionQueryController extends BaseController {
repository: Repository =>
private val logger = LogManager.getLogger(ActionQueryController.this)
def queryRoutes: Route =
auth { user =>
post {
pathPrefix("query") {
parameters('q.as[String]) { q =>
interceptor.intercept(user, q).toJson
}
}
}
}
}
|
chengpohi/coolmarks
|
src/main/scala/com/github/chengpohi/domain/search/ActionQueryController.scala
|
Scala
|
apache-2.0
| 637 |
package me.archdev.utils
import de.flapdoodle.embed.process.runtime.Network._
import me.archdev.restapi.utils.db.{DatabaseConnector, DatabaseMigrationManager}
import ru.yandex.qatools.embed.postgresql.PostgresStarter
import ru.yandex.qatools.embed.postgresql.config.AbstractPostgresConfig.{Credentials, Net, Storage, Timeout}
import ru.yandex.qatools.embed.postgresql.config.PostgresConfig
import ru.yandex.qatools.embed.postgresql.distribution.Version
object InMemoryPostgresStorage {
val dbHost = getLocalHost.getHostAddress
val dbPort = 25535
val dbName = "database-name"
val dbUser = "user"
val dbPassword = "password"
val jdbcUrl = s"jdbc:postgresql://$dbHost:$dbPort/$dbName"
val psqlConfig = new PostgresConfig(
Version.Main.V9_6, new Net(dbHost, dbPort),
new Storage(dbName), new Timeout(),
new Credentials(dbUser, dbPassword)
)
val psqlInstance = PostgresStarter.getDefaultInstance
val flywayService = new DatabaseMigrationManager(jdbcUrl, dbUser, dbPassword)
val process = psqlInstance.prepare(psqlConfig).start()
flywayService.dropDatabase()
flywayService.migrateDatabaseSchema()
val databaseConnector = new DatabaseConnector(
InMemoryPostgresStorage.jdbcUrl,
InMemoryPostgresStorage.dbUser,
InMemoryPostgresStorage.dbPassword
)
}
|
ArchDev/akka-http-rest
|
src/test/scala/me/archdev/utils/InMemoryPostgresStorage.scala
|
Scala
|
mit
| 1,301 |
package com.twitter.scrooge.ast
import scala.collection.mutable
import com.twitter.scrooge.frontend.ScroogeInternalException
sealed abstract class Identifier extends IdNode {
// It was intentional not to override toString. Instead, use
// "fullName" to indicate its purpose.
def fullName: String
def toCamelCase: Identifier
def toTitleCase: Identifier
def toUpperCase: Identifier
def toLowerCase: Identifier
// to prevent accidental use of Identifier as String
private[scrooge] def +(str: String): String =
throw new ScroogeInternalException("do not use \\"+\\" operation on Identifiers")
}
object Identifier {
// constructor
def apply(str: String): Identifier = {
assert(!str.isEmpty)
val ids = str.split("\\\\.")
if (ids.size == 1)
SimpleID(ids.head)
else
QualifiedID(ids)
}
def toTitleCase(str: String): String = toCamelCase(str, true)
/**
* convert string to camel case, with the following fine print:
* - leading underscores are preserved
* - internal underscores are removed. Character following an underscore
* is converted to upper case.
* - first character (non underscore char) is upper case if
* firstCharUp is true, lower case if false
* - first character of the second and following parts (text between underscores)
* is always in upper case
* - if a part is all upper case it is converted to lower case (except for first character),
* in other cases case is preserved
*
* Examples: (original, camel case, title case)
* (gen_html_report, genHtmlReport, GenHtmlReport)
* (GEN_HTML_REPORT, genHtmlReport, GenHtmlReport)
* (Gen_HTMLReport, genHTMLReport, GenHTMLReport)
* (Gen_HTML_Report, genHtmlReport, GenHtmlReport)
* (GENHTMLREPORT, genhtmlreport, Genhtmlreport)
* (genhtmlreport, genhtmlreport, Genhtmlreport)
* (genHtmlReport, genHtmlReport, GenHtmlReport)
* (genHTMLReport, genHTMLReport, GenHtmlReport)
* (_genHtmlReport, _genHtmlReport, _GenHtmlReport)
*/
def toCamelCase(str: String, firstCharUp: Boolean = false): String = {
str.takeWhile(_ == '_') + str.
split('_').
filterNot(_.isEmpty).
zipWithIndex.map { case (part, ind) =>
val isAllUpperCase = part.forall { c => c.isUpper || !c.isLetter }
val isAllLowerCase = part.forall { c => c.isLower || !c.isLetter }
val isPartialCamelCase = !isAllUpperCase && !isAllLowerCase
val first =
if (firstCharUp) {
part(0).toUpper
}
else if (isPartialCamelCase) {
part(0)
}
else if (ind == 0 && !isAllUpperCase) {
part(0).toLower
} else {
part(0).toUpper
}
val rest = part.drop(1)
new mutable.StringBuilder(part.size).append(first).append(rest)
}.
mkString
}
}
case class SimpleID(name: String, origName: Option[String] = None) extends Identifier {
assert(!name.contains(".") && !name.isEmpty) // name is a simple string
val fullName: String = name
val originalName = origName.getOrElse(fullName)
def toCamelCase = SimpleID(Identifier.toCamelCase(name), origName = Some(originalName))
def toTitleCase = SimpleID(Identifier.toTitleCase(name), origName = Some(originalName))
def toUpperCase = SimpleID(name.toUpperCase, origName = Some(originalName))
def toLowerCase = SimpleID(name.toLowerCase, origName = Some(originalName))
// append and prepend only available for SimpleID
// To encourage correct usage of SimpleID, we intentionally don't use implicit
// string conversions
def append(other: String): SimpleID = {
assert(!other.isEmpty && !other.contains("."))
SimpleID(name + other)
}
def prepend(other: String): SimpleID = {
assert(!other.isEmpty && !other.contains("."))
SimpleID(other + name)
}
def addScope(scope: Identifier): QualifiedID =
QualifiedID(scope match {
case SimpleID(s, _) => Seq(s, this.name)
case QualifiedID(names) => names :+ name
})
}
case class QualifiedID(names: Seq[String]) extends Identifier {
assert(names.size >= 2) // at least a scope and a name
assert(!names.exists(_.isEmpty))
val fullName: String = names.mkString(".")
// case conversion only happens on the last id
def toCamelCase =
QualifiedID(names.dropRight(1) :+ Identifier.toCamelCase(names.last))
def toTitleCase =
QualifiedID(names.dropRight(1) :+ Identifier.toTitleCase(names.last))
def toUpperCase =
QualifiedID(names.dropRight(1) :+ names.last.toUpperCase)
def toLowerCase =
QualifiedID(names.dropRight(1) :+ names.last.toLowerCase)
def head: SimpleID = SimpleID(names.head)
def tail: Identifier = Identifier(names.tail.mkString("."))
def qualifier: Identifier = Identifier(names.dropRight(1).mkString("."))
def name: SimpleID = SimpleID(names.last)
}
|
tellapart/scrooge
|
scrooge-generator/src/main/scala/com/twitter/scrooge/AST/Identifier.scala
|
Scala
|
apache-2.0
| 4,925 |
package spgui
import org.scalajs.dom.document
import scala.scalajs.js.JSApp
import scala.scalajs.js.annotation.JSExport
object Main extends JSApp {
@JSExport
override def main(): Unit = {
LoadingWidgets.loadWidgets
Layout().renderIntoDOM(document.getElementById("spgui-root"))
}
}
|
kristoferB/SP
|
sperica/frontend/src/main/scala/spgui/Main.scala
|
Scala
|
mit
| 298 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.submit.submitsteps
import io.fabric8.kubernetes.api.model.{Container, ContainerBuilder, HasMetadata, Pod, PodBuilder}
import org.apache.spark.SparkConf
/**
* Represents the components and characteristics of a Spark driver. The driver can be considered
* as being comprised of the driver pod itself, any other Kubernetes resources that the driver
* pod depends on, and the SparkConf that should be supplied to the Spark application. The driver
* container should be operated on via the specific field of this case class as opposed to trying
* to edit the container directly on the pod. The driver container should be attached at the
* end of executing all submission steps.
*/
private[spark] case class KubernetesDriverSpec(
driverPod: Pod,
driverContainer: Container,
otherKubernetesResources: Seq[HasMetadata],
driverSparkConf: SparkConf)
private[spark] object KubernetesDriverSpec {
def initialSpec(initialSparkConf: SparkConf): KubernetesDriverSpec = {
KubernetesDriverSpec(
// Set new metadata and a new spec so that submission steps can use
// PodBuilder#editMetadata() and/or PodBuilder#editSpec() safely.
new PodBuilder().withNewMetadata().endMetadata().withNewSpec().endSpec().build(),
new ContainerBuilder().build(),
Seq.empty[HasMetadata],
initialSparkConf.clone())
}
}
|
publicRoman/spark
|
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/submitsteps/KubernetesDriverSpec.scala
|
Scala
|
apache-2.0
| 2,200 |
/*
* Copyright 2011 javaBin
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package no.java.submitit.app
import _root_.java.io.Serializable
import org.apache.wicket.extensions.markup.html.captcha.CaptchaImageResource
import org.apache.wicket.markup.html.image.Image
import scala.math._
import no.java.submitit.config.Keys._
class Captcha extends Serializable {
private def randomString() = {
def randomInt(min: Int, max: Int):Int = (random * (max - min)).asInstanceOf[Int] + min
val captchaLength = SubmititApp.intSetting(captchaLengthInt) - 1
val res = for(i <- 0 to captchaLength) yield randomInt('a', 'z').asInstanceOf[Byte]
new String(res.toArray)
}
val imagePass = randomString()
val image = new Image("captchaImage", new CaptchaImageResource(imagePass))
var password: String = _
}
|
javaBin/submitit
|
submitit-webapp/src/main/scala/no/java/submitit/app/Captcha.scala
|
Scala
|
mit
| 1,367 |
package xyz.hyperreal.prolog.builtin
import xyz.hyperreal.bvm.VM
import java.io.{BufferedReader, FileReader, FileWriter, PrintWriter}
import xyz.hyperreal.char_reader.CharReader
import xyz.hyperreal.prolog.{
DataStream,
SinkStream,
SourceStream,
SystemInput,
SystemOutput,
TextSinkStream,
TextSourceStream,
UserInput,
UserOutput,
list2array
}
import scala.collection.mutable
object Streams {
var input: SourceStream = UserInput
var output: SinkStream = UserOutput
val aliases: mutable.Map[Symbol, DataStream] =
mutable.HashMap[Symbol, DataStream](
Symbol("user_input") -> UserInput,
Symbol("user_output") -> UserOutput,
Symbol("stdin") -> SystemInput,
Symbol("stdout") -> SystemOutput
)
def apply(s: Any): Any =
s match {
case stream: DataStream => stream
case alias: Symbol =>
aliases get alias match {
case Some(stream1: DataStream) => stream1
case _ => alias
}
case _ => s
}
def current_input(vm: VM, pos: IndexedSeq[CharReader], stream: Any): Boolean = vm.unify(stream, input)
def current_output(vm: VM, pos: IndexedSeq[CharReader], stream: Any): Boolean = vm.unify(stream, output)
def set_input(vm: VM, pos: IndexedSeq[CharReader], stream: Any): Boolean =
Streams(stream) match {
case _: vm.Variable => sys.error("set_input: input stream must be given")
case s: SourceStream =>
input = s
true
case _ => sys.error("set_input: stream is not a source stream")
}
def set_output(vm: VM, pos: IndexedSeq[CharReader], stream: Any): Boolean =
Streams(stream) match {
case _: vm.Variable => sys.error("set_output: output stream must be given")
case s: SinkStream =>
output = s
true
case _ => sys.error("set_output: stream is not a sink stream")
}
def open(vm: VM, pos: IndexedSeq[CharReader], file: Any, mode: Any, stream: Any, options: Any): Boolean =
(file, mode, stream, list2array(options) map (_.toList)) match {
case (f: String, m @ (Symbol("read") | Symbol("write") | Symbol("append")), s: vm.Variable, o) =>
val s1 =
m match {
case Symbol("read") =>
new TextSourceStream(new BufferedReader(new FileReader(f))) {
val file_name: Option[String] = Some(f)
val alias: Option[Symbol] = None
}
case Symbol("write") =>
new TextSinkStream(new PrintWriter(f), false) {
val file_name: Option[String] = Some(f)
val alias: Option[Symbol] = None
}
case Symbol("append") =>
new TextSinkStream(new PrintWriter(new FileWriter(f, true)), true) {
val file_name: Option[String] = Some(f)
val alias: Option[Symbol] = None
}
}
s bind s1
case _ => sys.error("open/4: invalid arguments")
}
def close(vm: VM, pos: IndexedSeq[CharReader], stream: Any, options: Any): Unit =
stream match {
case _: vm.Variable => sys.error("close/2: stream must be given")
case s: DataStream => s.close
case _ => sys.error("close/2: invalid arguments")
}
def flush_output(vm: VM, pos: IndexedSeq[CharReader], stream: Any): Unit =
stream match {
case _: vm.Variable => sys.error("flush_output/1: output stream must be given")
case s: SinkStream => s.flush
case _ => sys.error("flush_output/1: expected output stream")
}
}
|
edadma/funl
|
prolog/src/main/scala/xyz/hyperreal/prolog/builtin/Streams.scala
|
Scala
|
mit
| 3,592 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.