code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package tu.providers import tu.coreservice.utilities.Configurator import java.net.{URLConnection, InetSocketAddress, URL, URLEncoder} import java.io.{InputStreamReader, BufferedReader} /** * Provider for WordNetAnnotator. * @author alex toschev * Date: 6/25/12 * Time: 7:00 PM */ /** * Sends */ class WordnetAnnotatorProvider extends AnnotatorProvider { def annotate(word: String): List[String] = { val url: URL = new URL("http://wordnetweb.princeton.edu/perl/webwn?s=" + URLEncoder.encode(word, "UTF8")) var res: List[String] = List[String]() //Sends request to wordnet var rawString="" try { val connection = if (Configurator.proxyAddress().useProxy) { val proxy = new java.net.Proxy(java.net.Proxy.Type.HTTP, new InetSocketAddress(Configurator.proxyAddress().proxyHost, Configurator.proxyAddress().proxyPort)) url.openConnection(proxy) } else { url.openConnection() } //setup connection // we need only input connection.setDoInput(true) connection.setDoOutput(false) connection.setRequestProperty("User-Agent", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR 1.2.30703)") //open stream val in = new BufferedReader(new InputStreamReader(connection.getInputStream)) val xHtml = new StringBuilder //convert to string (scala xml trying to use DTD while load) var str = "" str = in.readLine() while (str != null) { xHtml.append(str) str = in.readLine() } in.close() rawString = xHtml.toString() } catch { case e: Exception => { //any exception with internet connection return res } } //get only li segment val targetString = if (rawString.indexOf("<li>") > 0 && rawString.indexOf("</li>") > 0) { rawString.substring(rawString.indexOf("<li>"), rawString.indexOf("</li>") + 5) } else { return res "" } val data = scala.xml.XML.loadString(targetString) (data \\ "a").foreach(a => { //skip system symbols if (!a.text.contains("(n)") && !a.text.contains("S:")) { if (res == null) res = List(a.text) else res ::= a.text } }) res } /** * priority of annotator. 0 top most local repository * @return */ def priority() = 2 /** * indicates that this is a local KB Annotator * @return true if local annotator */ def isLocal() = false def apply(word: String) = throw new Exception("Method is not supported by WordnetAnnotatorProvider") }
tu-team/2
coreservice.annotator/src/main/scala/tu/providers/WordnetAnnotatorProvider.scala
Scala
gpl-3.0
2,653
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ui.jobs import java.net.URLEncoder import java.util.Date import java.util.concurrent.TimeUnit import javax.servlet.http.HttpServletRequest import scala.collection.mutable.{HashMap, HashSet} import scala.xml.{Node, Unparsed} import org.apache.commons.lang3.StringEscapeUtils import org.apache.spark.scheduler.TaskLocality import org.apache.spark.status._ import org.apache.spark.status.api.v1._ import org.apache.spark.ui._ import org.apache.spark.util.Utils /** Page showing statistics and task list for a given stage */ private[ui] class StagePage(parent: StagesTab, store: AppStatusStore) extends WebUIPage("stage") { import ApiHelper._ private val TIMELINE_LEGEND = { <div class="legend-area"> <svg> { val legendPairs = List(("scheduler-delay-proportion", "Scheduler Delay"), ("deserialization-time-proportion", "Task Deserialization Time"), ("shuffle-read-time-proportion", "Shuffle Read Time"), ("executor-runtime-proportion", "Executor Computing Time"), ("shuffle-write-time-proportion", "Shuffle Write Time"), ("serialization-time-proportion", "Result Serialization Time"), ("getting-result-time-proportion", "Getting Result Time")) legendPairs.zipWithIndex.map { case ((classAttr, name), index) => <rect x={5 + (index / 3) * 210 + "px"} y={10 + (index % 3) * 15 + "px"} width="10px" height="10px" class={classAttr}></rect> <text x={25 + (index / 3) * 210 + "px"} y={20 + (index % 3) * 15 + "px"}>{name}</text> } } </svg> </div> } // TODO: We should consider increasing the number of this parameter over time // if we find that it's okay. private val MAX_TIMELINE_TASKS = parent.conf.getInt("spark.ui.timeline.tasks.maximum", 1000) private def getLocalitySummaryString(localitySummary: Map[String, Long]): String = { val names = Map( TaskLocality.PROCESS_LOCAL.toString() -> "Process local", TaskLocality.NODE_LOCAL.toString() -> "Node local", TaskLocality.RACK_LOCAL.toString() -> "Rack local", TaskLocality.ANY.toString() -> "Any") val localityNamesAndCounts = names.flatMap { case (key, name) => localitySummary.get(key).map { count => s"$name: $count" } }.toSeq localityNamesAndCounts.sorted.mkString("; ") } def render(request: HttpServletRequest): Seq[Node] = { // stripXSS is called first to remove suspicious characters used in XSS attacks val parameterId = UIUtils.stripXSS(request.getParameter("id")) require(parameterId != null && parameterId.nonEmpty, "Missing id parameter") val parameterAttempt = UIUtils.stripXSS(request.getParameter("attempt")) require(parameterAttempt != null && parameterAttempt.nonEmpty, "Missing attempt parameter") val parameterTaskPage = UIUtils.stripXSS(request.getParameter("task.page")) val parameterTaskSortColumn = UIUtils.stripXSS(request.getParameter("task.sort")) val parameterTaskSortDesc = UIUtils.stripXSS(request.getParameter("task.desc")) val parameterTaskPageSize = UIUtils.stripXSS(request.getParameter("task.pageSize")) val parameterTaskPrevPageSize = UIUtils.stripXSS(request.getParameter("task.prevPageSize")) val taskPage = Option(parameterTaskPage).map(_.toInt).getOrElse(1) val taskSortColumn = Option(parameterTaskSortColumn).map { sortColumn => UIUtils.decodeURLParameter(sortColumn) }.getOrElse("Index") val taskSortDesc = Option(parameterTaskSortDesc).map(_.toBoolean).getOrElse(false) val taskPageSize = Option(parameterTaskPageSize).map(_.toInt).getOrElse(100) val taskPrevPageSize = Option(parameterTaskPrevPageSize).map(_.toInt).getOrElse(taskPageSize) val stageId = parameterId.toInt val stageAttemptId = parameterAttempt.toInt val stageHeader = s"Details for Stage $stageId (Attempt $stageAttemptId)" val stageData = parent.store .asOption(parent.store.stageAttempt(stageId, stageAttemptId, details = false)) .getOrElse { val content = <div id="no-info"> <p>No information to display for Stage {stageId} (Attempt {stageAttemptId})</p> </div> return UIUtils.headerSparkPage(request, stageHeader, content, parent) } val localitySummary = store.localitySummary(stageData.stageId, stageData.attemptId) val totalTasks = taskCount(stageData) if (totalTasks == 0) { val content = <div> <h4>Summary Metrics</h4> No tasks have started yet <h4>Tasks</h4> No tasks have started yet </div> return UIUtils.headerSparkPage(request, stageHeader, content, parent) } val storedTasks = store.taskCount(stageData.stageId, stageData.attemptId) val numCompleted = stageData.numCompleteTasks val totalTasksNumStr = if (totalTasks == storedTasks) { s"$totalTasks" } else { s"$storedTasks, showing ${totalTasks}" } val summary = <div> <ul class="unstyled"> <li> <strong>Total Time Across All Tasks: </strong> {UIUtils.formatDuration(stageData.executorRunTime)} </li> <li> <strong>Locality Level Summary: </strong> {getLocalitySummaryString(localitySummary)} </li> {if (hasInput(stageData)) { <li> <strong>Input Size / Records: </strong> {s"${Utils.bytesToString(stageData.inputBytes)} / ${stageData.inputRecords}"} </li> }} {if (hasOutput(stageData)) { <li> <strong>Output: </strong> {s"${Utils.bytesToString(stageData.outputBytes)} / ${stageData.outputRecords}"} </li> }} {if (hasShuffleRead(stageData)) { <li> <strong>Shuffle Read: </strong> {s"${Utils.bytesToString(stageData.shuffleReadBytes)} / " + s"${stageData.shuffleReadRecords}"} </li> }} {if (hasShuffleWrite(stageData)) { <li> <strong>Shuffle Write: </strong> {s"${Utils.bytesToString(stageData.shuffleWriteBytes)} / " + s"${stageData.shuffleWriteRecords}"} </li> }} {if (hasBytesSpilled(stageData)) { <li> <strong>Shuffle Spill (Memory): </strong> {Utils.bytesToString(stageData.memoryBytesSpilled)} </li> <li> <strong>Shuffle Spill (Disk): </strong> {Utils.bytesToString(stageData.diskBytesSpilled)} </li> }} </ul> </div> val showAdditionalMetrics = <div> <span class="expand-additional-metrics"> <span class="expand-additional-metrics-arrow arrow-closed"></span> <a>Show Additional Metrics</a> </span> <div class="additional-metrics collapsed"> <ul> <li> <input type="checkbox" id="select-all-metrics"/> <span class="additional-metric-title"><em>(De)select All</em></span> </li> <li> <span data-toggle="tooltip" title={ToolTips.SCHEDULER_DELAY} data-placement="right"> <input type="checkbox" name={TaskDetailsClassNames.SCHEDULER_DELAY}/> <span class="additional-metric-title">Scheduler Delay</span> </span> </li> <li> <span data-toggle="tooltip" title={ToolTips.TASK_DESERIALIZATION_TIME} data-placement="right"> <input type="checkbox" name={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}/> <span class="additional-metric-title">Task Deserialization Time</span> </span> </li> {if (stageData.shuffleReadBytes > 0) { <li> <span data-toggle="tooltip" title={ToolTips.SHUFFLE_READ_BLOCKED_TIME} data-placement="right"> <input type="checkbox" name={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}/> <span class="additional-metric-title">Shuffle Read Blocked Time</span> </span> </li> <li> <span data-toggle="tooltip" title={ToolTips.SHUFFLE_READ_REMOTE_SIZE} data-placement="right"> <input type="checkbox" name={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}/> <span class="additional-metric-title">Shuffle Remote Reads</span> </span> </li> }} <li> <span data-toggle="tooltip" title={ToolTips.RESULT_SERIALIZATION_TIME} data-placement="right"> <input type="checkbox" name={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}/> <span class="additional-metric-title">Result Serialization Time</span> </span> </li> <li> <span data-toggle="tooltip" title={ToolTips.GETTING_RESULT_TIME} data-placement="right"> <input type="checkbox" name={TaskDetailsClassNames.GETTING_RESULT_TIME}/> <span class="additional-metric-title">Getting Result Time</span> </span> </li> <li> <span data-toggle="tooltip" title={ToolTips.PEAK_EXECUTION_MEMORY} data-placement="right"> <input type="checkbox" name={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}/> <span class="additional-metric-title">Peak Execution Memory</span> </span> </li> </ul> </div> </div> val stageGraph = parent.store.asOption(parent.store.operationGraphForStage(stageId)) val dagViz = UIUtils.showDagVizForStage(stageId, stageGraph) val accumulableHeaders: Seq[String] = Seq("Accumulable", "Value") def accumulableRow(acc: AccumulableInfo): Seq[Node] = { if (acc.name != null && acc.value != null) { <tr><td>{acc.name}</td><td>{acc.value}</td></tr> } else { Nil } } val accumulableTable = UIUtils.listingTable( accumulableHeaders, accumulableRow, stageData.accumulatorUpdates.toSeq) val page: Int = { // If the user has changed to a larger page size, then go to page 1 in order to avoid // IndexOutOfBoundsException. if (taskPageSize <= taskPrevPageSize) { taskPage } else { 1 } } val currentTime = System.currentTimeMillis() val (taskTable, taskTableHTML) = try { val _taskTable = new TaskPagedTable( stageData, UIUtils.prependBaseUri(request, parent.basePath) + s"/stages/stage/?id=${stageId}&attempt=${stageAttemptId}", currentTime, pageSize = taskPageSize, sortColumn = taskSortColumn, desc = taskSortDesc, store = parent.store ) (_taskTable, _taskTable.table(page)) } catch { case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) => val errorMessage = <div class="alert alert-error"> <p>Error while rendering stage table:</p> <pre> {Utils.exceptionString(e)} </pre> </div> (null, errorMessage) } val jsForScrollingDownToTaskTable = <script> {Unparsed { """ |$(function() { | if (/.*&task.sort=.*$/.test(location.search)) { | var topOffset = $("#tasks-section").offset().top; | $("html,body").animate({scrollTop: topOffset}, 200); | } |}); """.stripMargin } } </script> val metricsSummary = store.taskSummary(stageData.stageId, stageData.attemptId, Array(0, 0.25, 0.5, 0.75, 1.0)) val summaryTable = metricsSummary.map { metrics => def timeQuantiles(data: IndexedSeq[Double]): Seq[Node] = { data.map { millis => <td>{UIUtils.formatDuration(millis.toLong)}</td> } } def sizeQuantiles(data: IndexedSeq[Double]): Seq[Node] = { data.map { size => <td>{Utils.bytesToString(size.toLong)}</td> } } def sizeQuantilesWithRecords( data: IndexedSeq[Double], records: IndexedSeq[Double]) : Seq[Node] = { data.zip(records).map { case (d, r) => <td>{s"${Utils.bytesToString(d.toLong)} / ${r.toLong}"}</td> } } def titleCell(title: String, tooltip: String): Seq[Node] = { <td> <span data-toggle="tooltip" title={tooltip} data-placement="right"> {title} </span> </td> } def simpleTitleCell(title: String): Seq[Node] = <td>{title}</td> val deserializationQuantiles = titleCell("Task Deserialization Time", ToolTips.TASK_DESERIALIZATION_TIME) ++ timeQuantiles(metrics.executorDeserializeTime) val serviceQuantiles = simpleTitleCell("Duration") ++ timeQuantiles(metrics.executorRunTime) val gcQuantiles = titleCell("GC Time", ToolTips.GC_TIME) ++ timeQuantiles(metrics.jvmGcTime) val serializationQuantiles = titleCell("Result Serialization Time", ToolTips.RESULT_SERIALIZATION_TIME) ++ timeQuantiles(metrics.resultSerializationTime) val gettingResultQuantiles = titleCell("Getting Result Time", ToolTips.GETTING_RESULT_TIME) ++ timeQuantiles(metrics.gettingResultTime) val peakExecutionMemoryQuantiles = titleCell("Peak Execution Memory", ToolTips.PEAK_EXECUTION_MEMORY) ++ sizeQuantiles(metrics.peakExecutionMemory) // The scheduler delay includes the network delay to send the task to the worker // machine and to send back the result (but not the time to fetch the task result, // if it needed to be fetched from the block manager on the worker). val schedulerDelayQuantiles = titleCell("Scheduler Delay", ToolTips.SCHEDULER_DELAY) ++ timeQuantiles(metrics.schedulerDelay) def inputQuantiles: Seq[Node] = { simpleTitleCell("Input Size / Records") ++ sizeQuantilesWithRecords(metrics.inputMetrics.bytesRead, metrics.inputMetrics.recordsRead) } def outputQuantiles: Seq[Node] = { simpleTitleCell("Output Size / Records") ++ sizeQuantilesWithRecords(metrics.outputMetrics.bytesWritten, metrics.outputMetrics.recordsWritten) } def shuffleReadBlockedQuantiles: Seq[Node] = { titleCell("Shuffle Read Blocked Time", ToolTips.SHUFFLE_READ_BLOCKED_TIME) ++ timeQuantiles(metrics.shuffleReadMetrics.fetchWaitTime) } def shuffleReadTotalQuantiles: Seq[Node] = { titleCell("Shuffle Read Size / Records", ToolTips.SHUFFLE_READ) ++ sizeQuantilesWithRecords(metrics.shuffleReadMetrics.readBytes, metrics.shuffleReadMetrics.readRecords) } def shuffleReadRemoteQuantiles: Seq[Node] = { titleCell("Shuffle Remote Reads", ToolTips.SHUFFLE_READ_REMOTE_SIZE) ++ sizeQuantiles(metrics.shuffleReadMetrics.remoteBytesRead) } def shuffleWriteQuantiles: Seq[Node] = { simpleTitleCell("Shuffle Write Size / Records") ++ sizeQuantilesWithRecords(metrics.shuffleWriteMetrics.writeBytes, metrics.shuffleWriteMetrics.writeRecords) } def memoryBytesSpilledQuantiles: Seq[Node] = { simpleTitleCell("Shuffle spill (memory)") ++ sizeQuantiles(metrics.memoryBytesSpilled) } def diskBytesSpilledQuantiles: Seq[Node] = { simpleTitleCell("Shuffle spill (disk)") ++ sizeQuantiles(metrics.diskBytesSpilled) } val listings: Seq[Seq[Node]] = Seq( <tr>{serviceQuantiles}</tr>, <tr class={TaskDetailsClassNames.SCHEDULER_DELAY}>{schedulerDelayQuantiles}</tr>, <tr class={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}> {deserializationQuantiles} </tr> <tr>{gcQuantiles}</tr>, <tr class={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}> {serializationQuantiles} </tr>, <tr class={TaskDetailsClassNames.GETTING_RESULT_TIME}>{gettingResultQuantiles}</tr>, <tr class={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}> {peakExecutionMemoryQuantiles} </tr>, if (hasInput(stageData)) <tr>{inputQuantiles}</tr> else Nil, if (hasOutput(stageData)) <tr>{outputQuantiles}</tr> else Nil, if (hasShuffleRead(stageData)) { <tr class={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}> {shuffleReadBlockedQuantiles} </tr> <tr>{shuffleReadTotalQuantiles}</tr> <tr class={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}> {shuffleReadRemoteQuantiles} </tr> } else { Nil }, if (hasShuffleWrite(stageData)) <tr>{shuffleWriteQuantiles}</tr> else Nil, if (hasBytesSpilled(stageData)) <tr>{memoryBytesSpilledQuantiles}</tr> else Nil, if (hasBytesSpilled(stageData)) <tr>{diskBytesSpilledQuantiles}</tr> else Nil) val quantileHeaders = Seq("Metric", "Min", "25th percentile", "Median", "75th percentile", "Max") // The summary table does not use CSS to stripe rows, which doesn't work with hidden // rows (instead, JavaScript in table.js is used to stripe the non-hidden rows). UIUtils.listingTable( quantileHeaders, identity[Seq[Node]], listings, fixedWidth = true, id = Some("task-summary-table"), stripeRowsWithCss = false) } val executorTable = new ExecutorTable(stageData, parent.store) val maybeAccumulableTable: Seq[Node] = if (hasAccumulators(stageData)) { <h4>Accumulators</h4> ++ accumulableTable } else Seq() val aggMetrics = <span class="collapse-aggregated-metrics collapse-table" onClick="collapseTable('collapse-aggregated-metrics','aggregated-metrics')"> <h4> <span class="collapse-table-arrow arrow-open"></span> <a>Aggregated Metrics by Executor</a> </h4> </span> <div class="aggregated-metrics collapsible-table"> {executorTable.toNodeSeq} </div> val content = summary ++ dagViz ++ showAdditionalMetrics ++ makeTimeline( // Only show the tasks in the table Option(taskTable).map(_.dataSource.tasks).getOrElse(Nil), currentTime) ++ <h4>Summary Metrics for <a href="#tasks-section">{numCompleted} Completed Tasks</a></h4> ++ <div>{summaryTable.getOrElse("No tasks have reported metrics yet.")}</div> ++ aggMetrics ++ maybeAccumulableTable ++ <span id="tasks-section" class="collapse-aggregated-tasks collapse-table" onClick="collapseTable('collapse-aggregated-tasks','aggregated-tasks')"> <h4> <span class="collapse-table-arrow arrow-open"></span> <a>Tasks ({totalTasksNumStr})</a> </h4> </span> ++ <div class="aggregated-tasks collapsible-table"> {taskTableHTML ++ jsForScrollingDownToTaskTable} </div> UIUtils.headerSparkPage(request, stageHeader, content, parent, showVisualization = true) } def makeTimeline(tasks: Seq[TaskData], currentTime: Long): Seq[Node] = { val executorsSet = new HashSet[(String, String)] var minLaunchTime = Long.MaxValue var maxFinishTime = Long.MinValue val executorsArrayStr = tasks.sortBy(-_.launchTime.getTime()).take(MAX_TIMELINE_TASKS).map { taskInfo => val executorId = taskInfo.executorId val host = taskInfo.host executorsSet += ((executorId, host)) val launchTime = taskInfo.launchTime.getTime() val finishTime = taskInfo.duration.map(taskInfo.launchTime.getTime() + _) .getOrElse(currentTime) val totalExecutionTime = finishTime - launchTime minLaunchTime = launchTime.min(minLaunchTime) maxFinishTime = finishTime.max(maxFinishTime) def toProportion(time: Long) = time.toDouble / totalExecutionTime * 100 val metricsOpt = taskInfo.taskMetrics val shuffleReadTime = metricsOpt.map(_.shuffleReadMetrics.fetchWaitTime).getOrElse(0L) val shuffleReadTimeProportion = toProportion(shuffleReadTime) val shuffleWriteTime = (metricsOpt.map(_.shuffleWriteMetrics.writeTime).getOrElse(0L) / 1e6).toLong val shuffleWriteTimeProportion = toProportion(shuffleWriteTime) val serializationTime = metricsOpt.map(_.resultSerializationTime).getOrElse(0L) val serializationTimeProportion = toProportion(serializationTime) val deserializationTime = metricsOpt.map(_.executorDeserializeTime).getOrElse(0L) val deserializationTimeProportion = toProportion(deserializationTime) val gettingResultTime = AppStatusUtils.gettingResultTime(taskInfo) val gettingResultTimeProportion = toProportion(gettingResultTime) val schedulerDelay = AppStatusUtils.schedulerDelay(taskInfo) val schedulerDelayProportion = toProportion(schedulerDelay) val executorOverhead = serializationTime + deserializationTime val executorRunTime = if (taskInfo.duration.isDefined) { totalExecutionTime - executorOverhead - gettingResultTime } else { metricsOpt.map(_.executorRunTime).getOrElse( totalExecutionTime - executorOverhead - gettingResultTime) } val executorComputingTime = executorRunTime - shuffleReadTime - shuffleWriteTime val executorComputingTimeProportion = math.max(100 - schedulerDelayProportion - shuffleReadTimeProportion - shuffleWriteTimeProportion - serializationTimeProportion - deserializationTimeProportion - gettingResultTimeProportion, 0) val schedulerDelayProportionPos = 0 val deserializationTimeProportionPos = schedulerDelayProportionPos + schedulerDelayProportion val shuffleReadTimeProportionPos = deserializationTimeProportionPos + deserializationTimeProportion val executorRuntimeProportionPos = shuffleReadTimeProportionPos + shuffleReadTimeProportion val shuffleWriteTimeProportionPos = executorRuntimeProportionPos + executorComputingTimeProportion val serializationTimeProportionPos = shuffleWriteTimeProportionPos + shuffleWriteTimeProportion val gettingResultTimeProportionPos = serializationTimeProportionPos + serializationTimeProportion val index = taskInfo.index val attempt = taskInfo.attempt val svgTag = if (totalExecutionTime == 0) { // SPARK-8705: Avoid invalid attribute error in JavaScript if execution time is 0 """<svg class="task-assignment-timeline-duration-bar"></svg>""" } else { s"""<svg class="task-assignment-timeline-duration-bar"> |<rect class="scheduler-delay-proportion" |x="$schedulerDelayProportionPos%" y="0px" height="26px" |width="$schedulerDelayProportion%"></rect> |<rect class="deserialization-time-proportion" |x="$deserializationTimeProportionPos%" y="0px" height="26px" |width="$deserializationTimeProportion%"></rect> |<rect class="shuffle-read-time-proportion" |x="$shuffleReadTimeProportionPos%" y="0px" height="26px" |width="$shuffleReadTimeProportion%"></rect> |<rect class="executor-runtime-proportion" |x="$executorRuntimeProportionPos%" y="0px" height="26px" |width="$executorComputingTimeProportion%"></rect> |<rect class="shuffle-write-time-proportion" |x="$shuffleWriteTimeProportionPos%" y="0px" height="26px" |width="$shuffleWriteTimeProportion%"></rect> |<rect class="serialization-time-proportion" |x="$serializationTimeProportionPos%" y="0px" height="26px" |width="$serializationTimeProportion%"></rect> |<rect class="getting-result-time-proportion" |x="$gettingResultTimeProportionPos%" y="0px" height="26px" |width="$gettingResultTimeProportion%"></rect></svg>""".stripMargin } val timelineObject = s""" |{ |'className': 'task task-assignment-timeline-object', |'group': '$executorId', |'content': '<div class="task-assignment-timeline-content" |data-toggle="tooltip" data-placement="top" |data-html="true" data-container="body" |data-title="${s"Task " + index + " (attempt " + attempt + ")"}<br> |Status: ${taskInfo.status}<br> |Launch Time: ${UIUtils.formatDate(new Date(launchTime))} |${ if (!taskInfo.duration.isDefined) { s"""<br>Finish Time: ${UIUtils.formatDate(new Date(finishTime))}""" } else { "" } } |<br>Scheduler Delay: $schedulerDelay ms |<br>Task Deserialization Time: ${UIUtils.formatDuration(deserializationTime)} |<br>Shuffle Read Time: ${UIUtils.formatDuration(shuffleReadTime)} |<br>Executor Computing Time: ${UIUtils.formatDuration(executorComputingTime)} |<br>Shuffle Write Time: ${UIUtils.formatDuration(shuffleWriteTime)} |<br>Result Serialization Time: ${UIUtils.formatDuration(serializationTime)} |<br>Getting Result Time: ${UIUtils.formatDuration(gettingResultTime)}"> |$svgTag', |'start': new Date($launchTime), |'end': new Date($finishTime) |} |""".stripMargin.replaceAll("""[\\r\\n]+""", " ") timelineObject }.mkString("[", ",", "]") val groupArrayStr = executorsSet.map { case (executorId, host) => s""" { 'id': '$executorId', 'content': '$executorId / $host', } """ }.mkString("[", ",", "]") <span class="expand-task-assignment-timeline"> <span class="expand-task-assignment-timeline-arrow arrow-closed"></span> <a>Event Timeline</a> </span> ++ <div id="task-assignment-timeline" class="collapsed"> { if (MAX_TIMELINE_TASKS < tasks.size) { <strong> This page has more than the maximum number of tasks that can be shown in the visualization! Only the most recent {MAX_TIMELINE_TASKS} tasks (of {tasks.size} total) are shown. </strong> } else { Seq.empty } } <div class="control-panel"> <div id="task-assignment-timeline-zoom-lock"> <input type="checkbox"></input> <span>Enable zooming</span> </div> </div> {TIMELINE_LEGEND} </div> ++ <script type="text/javascript"> {Unparsed(s"drawTaskAssignmentTimeline(" + s"$groupArrayStr, $executorsArrayStr, $minLaunchTime, $maxFinishTime, " + s"${UIUtils.getTimeZoneOffset()})")} </script> } } private[ui] class TaskDataSource( stage: StageData, currentTime: Long, pageSize: Int, sortColumn: String, desc: Boolean, store: AppStatusStore) extends PagedDataSource[TaskData](pageSize) { import ApiHelper._ // Keep an internal cache of executor log maps so that long task lists render faster. private val executorIdToLogs = new HashMap[String, Map[String, String]]() private var _tasksToShow: Seq[TaskData] = null override def dataSize: Int = taskCount(stage) override def sliceData(from: Int, to: Int): Seq[TaskData] = { if (_tasksToShow == null) { _tasksToShow = store.taskList(stage.stageId, stage.attemptId, from, to - from, indexName(sortColumn), !desc) } _tasksToShow } def tasks: Seq[TaskData] = _tasksToShow def executorLogs(id: String): Map[String, String] = { executorIdToLogs.getOrElseUpdate(id, store.asOption(store.executorSummary(id)).map(_.executorLogs).getOrElse(Map.empty)) } } private[ui] class TaskPagedTable( stage: StageData, basePath: String, currentTime: Long, pageSize: Int, sortColumn: String, desc: Boolean, store: AppStatusStore) extends PagedTable[TaskData] { import ApiHelper._ override def tableId: String = "task-table" override def tableCssClass: String = "table table-bordered table-condensed table-striped table-head-clickable" override def pageSizeFormField: String = "task.pageSize" override def prevPageSizeFormField: String = "task.prevPageSize" override def pageNumberFormField: String = "task.page" override val dataSource: TaskDataSource = new TaskDataSource( stage, currentTime, pageSize, sortColumn, desc, store) override def pageLink(page: Int): String = { val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8") basePath + s"&$pageNumberFormField=$page" + s"&task.sort=$encodedSortColumn" + s"&task.desc=$desc" + s"&$pageSizeFormField=$pageSize" } override def goButtonFormPath: String = { val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8") s"$basePath&task.sort=$encodedSortColumn&task.desc=$desc" } def headers: Seq[Node] = { import ApiHelper._ val taskHeadersAndCssClasses: Seq[(String, String)] = Seq( (HEADER_TASK_INDEX, ""), (HEADER_ID, ""), (HEADER_ATTEMPT, ""), (HEADER_STATUS, ""), (HEADER_LOCALITY, ""), (HEADER_EXECUTOR, ""), (HEADER_HOST, ""), (HEADER_LAUNCH_TIME, ""), (HEADER_DURATION, ""), (HEADER_SCHEDULER_DELAY, TaskDetailsClassNames.SCHEDULER_DELAY), (HEADER_DESER_TIME, TaskDetailsClassNames.TASK_DESERIALIZATION_TIME), (HEADER_GC_TIME, ""), (HEADER_SER_TIME, TaskDetailsClassNames.RESULT_SERIALIZATION_TIME), (HEADER_GETTING_RESULT_TIME, TaskDetailsClassNames.GETTING_RESULT_TIME), (HEADER_PEAK_MEM, TaskDetailsClassNames.PEAK_EXECUTION_MEMORY)) ++ {if (hasAccumulators(stage)) Seq((HEADER_ACCUMULATORS, "")) else Nil} ++ {if (hasInput(stage)) Seq((HEADER_INPUT_SIZE, "")) else Nil} ++ {if (hasOutput(stage)) Seq((HEADER_OUTPUT_SIZE, "")) else Nil} ++ {if (hasShuffleRead(stage)) { Seq((HEADER_SHUFFLE_READ_TIME, TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME), (HEADER_SHUFFLE_TOTAL_READS, ""), (HEADER_SHUFFLE_REMOTE_READS, TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE)) } else { Nil }} ++ {if (hasShuffleWrite(stage)) { Seq((HEADER_SHUFFLE_WRITE_TIME, ""), (HEADER_SHUFFLE_WRITE_SIZE, "")) } else { Nil }} ++ {if (hasBytesSpilled(stage)) { Seq((HEADER_MEM_SPILL, ""), (HEADER_DISK_SPILL, "")) } else { Nil }} ++ Seq((HEADER_ERROR, "")) if (!taskHeadersAndCssClasses.map(_._1).contains(sortColumn)) { throw new IllegalArgumentException(s"Unknown column: $sortColumn") } val headerRow: Seq[Node] = { taskHeadersAndCssClasses.map { case (header, cssClass) => if (header == sortColumn) { val headerLink = Unparsed( basePath + s"&task.sort=${URLEncoder.encode(header, "UTF-8")}" + s"&task.desc=${!desc}" + s"&task.pageSize=$pageSize") val arrow = if (desc) "&#x25BE;" else "&#x25B4;" // UP or DOWN <th class={cssClass}> <a href={headerLink}> {header} <span>&nbsp;{Unparsed(arrow)}</span> </a> </th> } else { val headerLink = Unparsed( basePath + s"&task.sort=${URLEncoder.encode(header, "UTF-8")}" + s"&task.pageSize=$pageSize") <th class={cssClass}> <a href={headerLink}> {header} </a> </th> } } } <thead>{headerRow}</thead> } def row(task: TaskData): Seq[Node] = { def formatDuration(value: Option[Long], hideZero: Boolean = false): String = { value.map { v => if (v > 0 || !hideZero) UIUtils.formatDuration(v) else "" }.getOrElse("") } def formatBytes(value: Option[Long]): String = { Utils.bytesToString(value.getOrElse(0L)) } <tr> <td>{task.index}</td> <td>{task.taskId}</td> <td>{if (task.speculative) s"${task.attempt} (speculative)" else task.attempt.toString}</td> <td>{task.status}</td> <td>{task.taskLocality}</td> <td>{task.executorId}</td> <td> <div style="float: left">{task.host}</div> <div style="float: right"> { dataSource.executorLogs(task.executorId).map { case (logName, logUrl) => <div><a href={logUrl}>{logName}</a></div> } } </div> </td> <td>{UIUtils.formatDate(task.launchTime)}</td> <td>{formatDuration(task.duration)}</td> <td class={TaskDetailsClassNames.SCHEDULER_DELAY}> {UIUtils.formatDuration(AppStatusUtils.schedulerDelay(task))} </td> <td class={TaskDetailsClassNames.TASK_DESERIALIZATION_TIME}> {formatDuration(task.taskMetrics.map(_.executorDeserializeTime))} </td> <td> {formatDuration(task.taskMetrics.map(_.jvmGcTime), hideZero = true)} </td> <td class={TaskDetailsClassNames.RESULT_SERIALIZATION_TIME}> {formatDuration(task.taskMetrics.map(_.resultSerializationTime))} </td> <td class={TaskDetailsClassNames.GETTING_RESULT_TIME}> {UIUtils.formatDuration(AppStatusUtils.gettingResultTime(task))} </td> <td class={TaskDetailsClassNames.PEAK_EXECUTION_MEMORY}> {formatBytes(task.taskMetrics.map(_.peakExecutionMemory))} </td> {if (hasAccumulators(stage)) { <td>{accumulatorsInfo(task)}</td> }} {if (hasInput(stage)) { metricInfo(task) { m => val bytesRead = Utils.bytesToString(m.inputMetrics.bytesRead) val records = m.inputMetrics.recordsRead <td>{bytesRead} / {records}</td> } }} {if (hasOutput(stage)) { metricInfo(task) { m => val bytesWritten = Utils.bytesToString(m.outputMetrics.bytesWritten) val records = m.outputMetrics.recordsWritten <td>{bytesWritten} / {records}</td> } }} {if (hasShuffleRead(stage)) { <td class={TaskDetailsClassNames.SHUFFLE_READ_BLOCKED_TIME}> {formatDuration(task.taskMetrics.map(_.shuffleReadMetrics.fetchWaitTime))} </td> <td>{ metricInfo(task) { m => val bytesRead = Utils.bytesToString(totalBytesRead(m.shuffleReadMetrics)) val records = m.shuffleReadMetrics.recordsRead Unparsed(s"$bytesRead / $records") } }</td> <td class={TaskDetailsClassNames.SHUFFLE_READ_REMOTE_SIZE}> {formatBytes(task.taskMetrics.map(_.shuffleReadMetrics.remoteBytesRead))} </td> }} {if (hasShuffleWrite(stage)) { <td>{ formatDuration( task.taskMetrics.map { m => TimeUnit.NANOSECONDS.toMillis(m.shuffleWriteMetrics.writeTime) }, hideZero = true) }</td> <td>{ metricInfo(task) { m => val bytesWritten = Utils.bytesToString(m.shuffleWriteMetrics.bytesWritten) val records = m.shuffleWriteMetrics.recordsWritten Unparsed(s"$bytesWritten / $records") } }</td> }} {if (hasBytesSpilled(stage)) { <td>{formatBytes(task.taskMetrics.map(_.memoryBytesSpilled))}</td> <td>{formatBytes(task.taskMetrics.map(_.diskBytesSpilled))}</td> }} {errorMessageCell(task.errorMessage.getOrElse(""))} </tr> } private def accumulatorsInfo(task: TaskData): Seq[Node] = { task.accumulatorUpdates.flatMap { acc => if (acc.name != null && acc.update.isDefined) { Unparsed(StringEscapeUtils.escapeHtml4(s"${acc.name}: ${acc.update.get}")) ++ <br /> } else { Nil } } } private def metricInfo(task: TaskData)(fn: TaskMetrics => Seq[Node]): Seq[Node] = { task.taskMetrics.map(fn).getOrElse(Nil) } private def errorMessageCell(error: String): Seq[Node] = { val isMultiline = error.indexOf('\\n') >= 0 // Display the first line by default val errorSummary = StringEscapeUtils.escapeHtml4( if (isMultiline) { error.substring(0, error.indexOf('\\n')) } else { error }) val details = if (isMultiline) { // scalastyle:off <span onclick="this.parentNode.querySelector('.stacktrace-details').classList.toggle('collapsed')" class="expand-details"> +details </span> ++ <div class="stacktrace-details collapsed"> <pre>{error}</pre> </div> // scalastyle:on } else { "" } <td>{errorSummary}{details}</td> } } private[ui] object ApiHelper { val HEADER_ID = "ID" val HEADER_TASK_INDEX = "Index" val HEADER_ATTEMPT = "Attempt" val HEADER_STATUS = "Status" val HEADER_LOCALITY = "Locality Level" val HEADER_EXECUTOR = "Executor ID" val HEADER_HOST = "Host" val HEADER_LAUNCH_TIME = "Launch Time" val HEADER_DURATION = "Duration" val HEADER_SCHEDULER_DELAY = "Scheduler Delay" val HEADER_DESER_TIME = "Task Deserialization Time" val HEADER_GC_TIME = "GC Time" val HEADER_SER_TIME = "Result Serialization Time" val HEADER_GETTING_RESULT_TIME = "Getting Result Time" val HEADER_PEAK_MEM = "Peak Execution Memory" val HEADER_ACCUMULATORS = "Accumulators" val HEADER_INPUT_SIZE = "Input Size / Records" val HEADER_OUTPUT_SIZE = "Output Size / Records" val HEADER_SHUFFLE_READ_TIME = "Shuffle Read Blocked Time" val HEADER_SHUFFLE_TOTAL_READS = "Shuffle Read Size / Records" val HEADER_SHUFFLE_REMOTE_READS = "Shuffle Remote Reads" val HEADER_SHUFFLE_WRITE_TIME = "Write Time" val HEADER_SHUFFLE_WRITE_SIZE = "Shuffle Write Size / Records" val HEADER_MEM_SPILL = "Shuffle Spill (Memory)" val HEADER_DISK_SPILL = "Shuffle Spill (Disk)" val HEADER_ERROR = "Errors" private[ui] val COLUMN_TO_INDEX = Map( HEADER_ID -> null.asInstanceOf[String], HEADER_TASK_INDEX -> TaskIndexNames.TASK_INDEX, HEADER_ATTEMPT -> TaskIndexNames.ATTEMPT, HEADER_STATUS -> TaskIndexNames.STATUS, HEADER_LOCALITY -> TaskIndexNames.LOCALITY, HEADER_EXECUTOR -> TaskIndexNames.EXECUTOR, HEADER_HOST -> TaskIndexNames.HOST, HEADER_LAUNCH_TIME -> TaskIndexNames.LAUNCH_TIME, HEADER_DURATION -> TaskIndexNames.DURATION, HEADER_SCHEDULER_DELAY -> TaskIndexNames.SCHEDULER_DELAY, HEADER_DESER_TIME -> TaskIndexNames.DESER_TIME, HEADER_GC_TIME -> TaskIndexNames.GC_TIME, HEADER_SER_TIME -> TaskIndexNames.SER_TIME, HEADER_GETTING_RESULT_TIME -> TaskIndexNames.GETTING_RESULT_TIME, HEADER_PEAK_MEM -> TaskIndexNames.PEAK_MEM, HEADER_ACCUMULATORS -> TaskIndexNames.ACCUMULATORS, HEADER_INPUT_SIZE -> TaskIndexNames.INPUT_SIZE, HEADER_OUTPUT_SIZE -> TaskIndexNames.OUTPUT_SIZE, HEADER_SHUFFLE_READ_TIME -> TaskIndexNames.SHUFFLE_READ_TIME, HEADER_SHUFFLE_TOTAL_READS -> TaskIndexNames.SHUFFLE_TOTAL_READS, HEADER_SHUFFLE_REMOTE_READS -> TaskIndexNames.SHUFFLE_REMOTE_READS, HEADER_SHUFFLE_WRITE_TIME -> TaskIndexNames.SHUFFLE_WRITE_TIME, HEADER_SHUFFLE_WRITE_SIZE -> TaskIndexNames.SHUFFLE_WRITE_SIZE, HEADER_MEM_SPILL -> TaskIndexNames.MEM_SPILL, HEADER_DISK_SPILL -> TaskIndexNames.DISK_SPILL, HEADER_ERROR -> TaskIndexNames.ERROR) def hasAccumulators(stageData: StageData): Boolean = { stageData.accumulatorUpdates.exists { acc => acc.name != null && acc.value != null } } def hasInput(stageData: StageData): Boolean = stageData.inputBytes > 0 def hasOutput(stageData: StageData): Boolean = stageData.outputBytes > 0 def hasShuffleRead(stageData: StageData): Boolean = stageData.shuffleReadBytes > 0 def hasShuffleWrite(stageData: StageData): Boolean = stageData.shuffleWriteBytes > 0 def hasBytesSpilled(stageData: StageData): Boolean = { stageData.diskBytesSpilled > 0 || stageData.memoryBytesSpilled > 0 } def totalBytesRead(metrics: ShuffleReadMetrics): Long = { metrics.localBytesRead + metrics.remoteBytesRead } def indexName(sortColumn: String): Option[String] = { COLUMN_TO_INDEX.get(sortColumn) match { case Some(v) => Option(v) case _ => throw new IllegalArgumentException(s"Invalid sort column: $sortColumn") } } def lastStageNameAndDescription(store: AppStatusStore, job: JobData): (String, String) = { val stage = store.asOption(store.stageAttempt(job.stageIds.max, 0)) (stage.map(_.name).getOrElse(""), stage.flatMap(_.description).getOrElse(job.name)) } def taskCount(stageData: StageData): Int = { stageData.numActiveTasks + stageData.numCompleteTasks + stageData.numFailedTasks + stageData.numKilledTasks } }
bravo-zhang/spark
core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
Scala
apache-2.0
42,570
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.examples.sql import org.apache.spark.sql.Row // $example on:init_session$ import org.apache.spark.sql.SparkSession // $example off:init_session$ // $example on:programmatic_schema$ // $example on:data_types$ import org.apache.spark.sql.types._ // $example off:data_types$ // $example off:programmatic_schema$ object SparkSQLExample { // $example on:create_ds$ case class Person(name: String, age: Long) // $example off:create_ds$ def main(args: Array[String]) { // $example on:init_session$ val spark = SparkSession .builder() .appName("Spark SQL basic example") .config("spark.some.config.option", "some-value") .getOrCreate() // For implicit conversions like converting RDDs to DataFrames import spark.implicits._ // $example off:init_session$ runBasicDataFrameExample(spark) runDatasetCreationExample(spark) runInferSchemaExample(spark) runProgrammaticSchemaExample(spark) spark.stop() } private def runBasicDataFrameExample(spark: SparkSession): Unit = { // $example on:create_df$ val df = spark.read.json("examples/src/main/resources/people.json") // Displays the content of the DataFrame to stdout df.show() // +----+-------+ // | age| name| // +----+-------+ // |null|Michael| // | 30| Andy| // | 19| Justin| // +----+-------+ // $example off:create_df$ // $example on:untyped_ops$ // This import is needed to use the $-notation import spark.implicits._ // Print the schema in a tree format df.printSchema() // root // |-- age: long (nullable = true) // |-- name: string (nullable = true) // Select only the "name" column df.select("name").show() // +-------+ // | name| // +-------+ // |Michael| // | Andy| // | Justin| // +-------+ // Select everybody, but increment the age by 1 df.select($"name", $"age" + 1).show() // +-------+---------+ // | name|(age + 1)| // +-------+---------+ // |Michael| null| // | Andy| 31| // | Justin| 20| // +-------+---------+ // Select people older than 21 df.filter($"age" > 21).show() // +---+----+ // |age|name| // +---+----+ // | 30|Andy| // +---+----+ // Count people by age df.groupBy("age").count().show() // +----+-----+ // | age|count| // +----+-----+ // | 19| 1| // |null| 1| // | 30| 1| // +----+-----+ // $example off:untyped_ops$ // $example on:run_sql$ // Register the DataFrame as a SQL temporary view df.createOrReplaceTempView("people") val sqlDF = spark.sql("SELECT * FROM people") sqlDF.show() // +----+-------+ // | age| name| // +----+-------+ // |null|Michael| // | 30| Andy| // | 19| Justin| // +----+-------+ // $example off:run_sql$ // $example on:global_temp_view$ // Register the DataFrame as a global temporary view df.createGlobalTempView("people") // Global temporary view is tied to a system preserved database `global_temp` spark.sql("SELECT * FROM global_temp.people").show() // +----+-------+ // | age| name| // +----+-------+ // |null|Michael| // | 30| Andy| // | 19| Justin| // +----+-------+ // Global temporary view is cross-session spark.newSession().sql("SELECT * FROM global_temp.people").show() // +----+-------+ // | age| name| // +----+-------+ // |null|Michael| // | 30| Andy| // | 19| Justin| // +----+-------+ // $example off:global_temp_view$ } private def runDatasetCreationExample(spark: SparkSession): Unit = { import spark.implicits._ // $example on:create_ds$ // Encoders are created for case classes val caseClassDS = Seq(Person("Andy", 32)).toDS() caseClassDS.show() // +----+---+ // |name|age| // +----+---+ // |Andy| 32| // +----+---+ // Encoders for most common types are automatically provided by importing spark.implicits._ val primitiveDS = Seq(1, 2, 3).toDS() primitiveDS.map(_ + 1).collect() // Returns: Array(2, 3, 4) // DataFrames can be converted to a Dataset by providing a class. Mapping will be done by name val path = "examples/src/main/resources/people.json" val peopleDS = spark.read.json(path).as[Person] peopleDS.show() // +----+-------+ // | age| name| // +----+-------+ // |null|Michael| // | 30| Andy| // | 19| Justin| // +----+-------+ // $example off:create_ds$ } private def runInferSchemaExample(spark: SparkSession): Unit = { // $example on:schema_inferring$ // For implicit conversions from RDDs to DataFrames import spark.implicits._ // Create an RDD of Person objects from a text file, convert it to a Dataframe val peopleDF = spark.sparkContext .textFile("examples/src/main/resources/people.txt") .map(_.split(",")) .map(attributes => Person(attributes(0), attributes(1).trim.toInt)) .toDF() // Register the DataFrame as a temporary view peopleDF.createOrReplaceTempView("people") // SQL statements can be run by using the sql methods provided by Spark val teenagersDF = spark.sql("SELECT name, age FROM people WHERE age BETWEEN 13 AND 19") // The columns of a row in the result can be accessed by field index teenagersDF.map(teenager => "Name: " + teenager(0)).show() // +------------+ // | value| // +------------+ // |Name: Justin| // +------------+ // or by field name teenagersDF.map(teenager => "Name: " + teenager.getAs[String]("name")).show() // +------------+ // | value| // +------------+ // |Name: Justin| // +------------+ // No pre-defined encoders for Dataset[Map[K,V]], define explicitly implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, Any]] // Primitive types and case classes can be also defined as // implicit val stringIntMapEncoder: Encoder[Map[String, Any]] = ExpressionEncoder() // row.getValuesMap[T] retrieves multiple columns at once into a Map[String, T] teenagersDF.map(teenager => teenager.getValuesMap[Any](List("name", "age"))).collect() // Array(Map("name" -> "Justin", "age" -> 19)) // $example off:schema_inferring$ } private def runProgrammaticSchemaExample(spark: SparkSession): Unit = { import spark.implicits._ // $example on:programmatic_schema$ // Create an RDD val peopleRDD = spark.sparkContext.textFile("examples/src/main/resources/people.txt") // The schema is encoded in a string val schemaString = "name age" // Generate the schema based on the string of schema val fields = schemaString.split(" ") .map(fieldName => StructField(fieldName, StringType, nullable = true)) val schema = StructType(fields) // Convert records of the RDD (people) to Rows val rowRDD = peopleRDD .map(_.split(",")) .map(attributes => Row(attributes(0), attributes(1).trim)) // Apply the schema to the RDD val peopleDF = spark.createDataFrame(rowRDD, schema) // Creates a temporary view using the DataFrame peopleDF.createOrReplaceTempView("people") // SQL can be run over a temporary view created using DataFrames val results = spark.sql("SELECT name FROM people") // The results of SQL queries are DataFrames and support all the normal RDD operations // The columns of a row in the result can be accessed by field index or by field name results.map(attributes => "Name: " + attributes(0)).show() // +-------------+ // | value| // +-------------+ // |Name: Michael| // | Name: Andy| // | Name: Justin| // +-------------+ // $example off:programmatic_schema$ } }
lxsmnv/spark
examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala
Scala
apache-2.0
8,713
package scalachessjs import scala.scalajs.js.JSApp import scala.scalajs.js import org.scalajs.dom import js.Dynamic.{ /* global => g, newInstance => jsnew, */ literal => jsobj } import js.JSConverters._ import js.annotation._ import chess.{ Success, Failure, Game, Pos, Role, PromotableRole, Replay, Status, MoveOrDrop } import chess.variant.Variant import chess.format.{ UciCharPair, UciDump } import chess.format.pgn.Reader object Main extends JSApp { def main(): Unit = { val self = js.Dynamic.global self.addEventListener("message", { e: dom.MessageEvent => try { val data = e.data.asInstanceOf[Message] val reqidOpt = data.reqid.asInstanceOf[js.UndefOr[String]].toOption val payload = data.payload.asInstanceOf[js.Dynamic] val fen = payload.fen.asInstanceOf[js.UndefOr[String]].toOption val variantKey = payload.variant.asInstanceOf[js.UndefOr[String]].toOption val variant = variantKey.flatMap(Variant(_)) data.topic match { case "init" => { init(reqidOpt, variant, fen) } case "dests" => { val path = payload.path.asInstanceOf[js.UndefOr[String]].toOption fen.fold { sendError(reqidOpt, data.topic, "fen field is required for dests topic") } { fen => getDests(reqidOpt, variant, fen, path) } } case "situation" => { val path = payload.path.asInstanceOf[js.UndefOr[String]].toOption fen.fold { sendError(reqidOpt, data.topic, "fen field is required for situation topic") } { fen => val game = Game(variant, Some(fen)) self.postMessage(Message( reqid = reqidOpt, topic = "situation", payload = jsobj( "situation" -> gameSituation(game), "path" -> path.orUndefined ) )) () } } case "threefoldTest" => { val pgnMoves = payload.pgnMoves.asInstanceOf[js.Array[String]].toList val initialFen = payload.initialFen.asInstanceOf[js.UndefOr[String]].toOption Replay(pgnMoves, initialFen, variant getOrElse Variant.default) match { case Success(Reader.Result.Complete(replay)) => { self.postMessage(Message( reqid = reqidOpt, topic = "threefoldTest", payload = jsobj( "threefoldRepetition" -> replay.state.board.history.threefoldRepetition, "status" -> jsobj( "id" -> Status.Draw.id, "name" -> Status.Draw.name ) ) )) } case Success(Reader.Result.Incomplete(_, errors)) => sendError(reqidOpt, data.topic, errors.head) case Failure(errors) => sendError(reqidOpt, data.topic, errors.head) } } case "move" => { val promotion = payload.promotion.asInstanceOf[js.UndefOr[String]].toOption val origS = payload.orig.asInstanceOf[String] val destS = payload.dest.asInstanceOf[String] val pgnMovesOpt = payload.pgnMoves.asInstanceOf[js.UndefOr[js.Array[String]]].toOption val uciMovesOpt = payload.uciMoves.asInstanceOf[js.UndefOr[js.Array[String]]].toOption val pgnMoves = pgnMovesOpt.map(_.toVector).getOrElse(Vector.empty[String]) val uciMoves = uciMovesOpt.map(_.toList).getOrElse(List.empty[String]) val path = payload.path.asInstanceOf[js.UndefOr[String]].toOption (for { orig <- Pos.posAt(origS) dest <- Pos.posAt(destS) fen <- fen } yield (orig, dest, fen)) match { case Some((orig, dest, fen)) => move(reqidOpt, variant, fen, pgnMoves, uciMoves, orig, dest, Role.promotable(promotion), path) case None => sendError(reqidOpt, data.topic, s"step topic params: $origS, $destS, $fen are not valid") } } case "drop" => { val roleS = payload.role.asInstanceOf[String] val posS = payload.pos.asInstanceOf[String] val pgnMovesOpt = payload.pgnMoves.asInstanceOf[js.UndefOr[js.Array[String]]].toOption val uciMovesOpt = payload.uciMoves.asInstanceOf[js.UndefOr[js.Array[String]]].toOption val pgnMoves = pgnMovesOpt.map(_.toVector).getOrElse(Vector.empty[String]) val uciMoves = uciMovesOpt.map(_.toList).getOrElse(List.empty[String]) val path = payload.path.asInstanceOf[js.UndefOr[String]].toOption (for { pos <- Pos.posAt(posS) role <- Role.allByName get roleS fen <- fen } yield (pos, role, fen)) match { case Some((pos, role, fen)) => drop(reqidOpt, variant, fen, pgnMoves, uciMoves, role, pos, path) case None => sendError(reqidOpt, data.topic, s"step topic params: $posS, $roleS, $fen are not valid") } } case "pgnDump" => { val pgnMoves = payload.pgnMoves.asInstanceOf[js.Array[String]].toList val initialFen = payload.initialFen.asInstanceOf[js.UndefOr[String]].toOption val white = payload.white.asInstanceOf[js.UndefOr[String]].toOption val black = payload.black.asInstanceOf[js.UndefOr[String]].toOption val date = payload.date.asInstanceOf[js.UndefOr[String]].toOption Replay(pgnMoves, initialFen, variant getOrElse Variant.default) match { case Success(Reader.Result.Complete(replay)) => { val pgn = PgnDump(replay.state, initialFen, replay.setup.startedAtTurn, white, black, date) self.postMessage(Message( reqid = reqidOpt, topic = "pgnDump", payload = jsobj( "pgn" -> pgn.toString ) )) } case Success(Reader.Result.Incomplete(_, errors)) => sendError(reqidOpt, data.topic, errors.head) case Failure(errors) => sendError(reqidOpt, data.topic, errors.head) } } case _ => { sendError(reqidOpt, data.topic, "Invalid command.") } } } catch { case ex: Exception => { val data = e.data.asInstanceOf[Message] val reqidOpt = data.reqid.asInstanceOf[js.UndefOr[String]].toOption sendError(reqidOpt, data.topic, "Exception caught in scalachessjs: " + ex) } } }) def init(reqid: Option[String], variant: Option[Variant], fen: Option[String]): Unit = { val game = Game(variant, fen) self.postMessage(Message( reqid = reqid, topic = "init", payload = jsobj( "variant" -> new VariantInfo { val key = game.board.variant.key val name = game.board.variant.name val shortName = game.board.variant.shortName val title = game.board.variant.title }, "setup" -> gameSituation(game) ) )) () } def getDests(reqid: Option[String], variant: Option[Variant], fen: String, path: Option[String]): Unit = { val game = Game(variant, Some(fen)) val movable = !game.situation.end val dests = if (movable) possibleDests(game) else emptyDests self.postMessage(Message( reqid = reqid, topic = "dests", payload = jsobj( "dests" -> dests, "path" -> path.orUndefined ) )) () } def move(reqid: Option[String], variant: Option[Variant], fen: String, pgnMoves: Vector[String], uciMoves: List[String], orig: Pos, dest: Pos, promotion: Option[PromotableRole], path: Option[String]): Unit = { Game(variant, Some(fen))(orig, dest, promotion) match { case Success((newGame, move)) => { self.postMessage(Message( reqid = reqid, topic = "move", payload = jsobj( "situation" -> gameSituation(newGame.withPgnMoves(pgnMoves ++ newGame.pgnMoves), Some(Left(move)), uciMoves, promotion), "path" -> path.orUndefined ) )) () } case Failure(errors) => sendError(reqid, "move", errors.head) } } def drop(reqid: Option[String], variant: Option[Variant], fen: String, pgnMoves: Vector[String], uciMoves: List[String], role: Role, pos: Pos, path: Option[String]): Unit = { Game(variant, Some(fen)).drop(role, pos) match { case Success((newGame, drop)) => { self.postMessage(Message( reqid = reqid, topic = "drop", payload = jsobj( "situation" -> gameSituation(newGame.withPgnMoves(pgnMoves ++ newGame.pgnMoves), Some(Right(drop)), uciMoves), "path" -> path.orUndefined ) )) () } case Failure(errors) => sendError(reqid, "drop", errors.head) } } def sendError(reqid: Option[String], callerTopic: String, error: String): Unit = { self.postMessage(Message( reqid = reqid, topic = "error", payload = jsobj( "callerTopic" -> callerTopic, "error" -> error ) )) () } } private val emptyDests: js.Dictionary[js.Array[String]] = js.Dictionary() private def moveOrDropToUciCharPair(m: MoveOrDrop): UciCharPair = UciCharPair(m.fold(_.toUci, _.toUci)) private def gameSituation( game: Game, lastMoveOpt: Option[MoveOrDrop] = None, prevUciMoves: List[String] = List.empty[String], promotionRole: Option[PromotableRole] = None ): js.Object = { val lmUci = lastMoveOpt.map(UciDump.move(game.board.variant)(_)) val mergedUciMoves = lmUci.fold(prevUciMoves) { uci => prevUciMoves :+ uci } val movable = !game.situation.end new Situation { val id = lastMoveOpt.fold("")(moveOrDropToUciCharPair(_).toString) val variant = game.board.variant.key val fen = chess.format.Forsyth >> game val player = game.player.name val dests = if (movable) possibleDests(game) else emptyDests val drops = possibleDrops(game) val end = game.situation.end val playable = game.situation.playable(true) val winner = game.situation.winner.map(_.name).orUndefined val check = game.situation.check val checkCount = (if (game.board.variant.key == "threeCheck") Some(jsobj( "white" -> game.board.history.checkCount.white, "black" -> game.board.history.checkCount.black )) else None).orUndefined val uci = lmUci.orUndefined val san = game.pgnMoves.lastOption.orUndefined val pgnMoves = game.pgnMoves.toJSArray val uciMoves = mergedUciMoves.toJSArray val promotion = promotionRole.map(_.forsyth).map(_.toString).orUndefined val status = game.situation.status.map { s => jsobj( "id" -> s.id, "name" -> s.name ) }.orUndefined val crazyhouse = game.board.crazyData.map { d => jsobj( "pockets" -> js.Array( d.pockets.white.roles.map(_.name).groupBy(identity).mapValues(_.size).toJSDictionary, d.pockets.black.roles.map(_.name).groupBy(identity).mapValues(_.size).toJSDictionary ) ) }.orUndefined val ply = game.turns } } private def possibleDests(game: Game): js.Dictionary[js.Array[String]] = { game.situation.destinations.map { case (pos, dests) => (pos.toString -> dests.map(_.toString).toJSArray) }.toJSDictionary } private def possibleDrops(game: Game): js.UndefOr[js.Array[String]] = { game.situation.drops.map { drops => drops.map(_.toString).toJSArray }.orUndefined } } @js.native trait Message extends js.Object { val topic: String val payload: js.Any val reqid: js.UndefOr[String] } object Message { def apply(topic: String, payload: js.Any, reqid: Option[String]): Message = js.Dynamic.literal(topic = topic, payload = payload, reqid = reqid.orUndefined).asInstanceOf[Message] } @ScalaJSDefined trait VariantInfo extends js.Object { val key: String val name: String val shortName: String val title: String } @ScalaJSDefined trait Situation extends js.Object { val id: String val ply: Int val variant: String val fen: String val player: String val dests: js.Dictionary[js.Array[String]] val drops: js.UndefOr[js.Array[String]] val end: Boolean val playable: Boolean val status: js.UndefOr[js.Object] val winner: js.UndefOr[String] val check: Boolean val checkCount: js.UndefOr[js.Object] val pgnMoves: js.Array[String] val uciMoves: js.Array[String] val san: js.UndefOr[String] val uci: js.UndefOr[String] val promotion: js.UndefOr[String] val crazyhouse: js.UndefOr[js.Object] }
veloce/scalachessjs
src/main/scala/scalachessjs/Main.scala
Scala
mit
13,204
package com.rasterfoundry.database import com.rasterfoundry.common.Generators.Implicits._ import com.rasterfoundry.datamodel._ import doobie.implicits._ import org.scalacheck.Prop.forAll import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers import org.scalatestplus.scalacheck.Checkers class UserIntercomConversationDaoSpec extends AnyFunSuite with Matchers with Checkers with DBTestConfig with PropTestHelpers with ConnectionIOLogger { test("insert a user intercom conversation and then get it") { check { forAll( ( userCreate: User.Create, conversationId: String ) => { val insertAndGetIO = for { user <- UserDao.create(userCreate) inserted <- UserIntercomConversationDao.insertUserConversation( user.id, conversationId ) dbConvo <- UserIntercomConversationDao.getByUserId(user.id) } yield (inserted, user, dbConvo) val (insertedConversation, insertedUser, dbConversation) = insertAndGetIO.transact(xa).unsafeRunSync assert( insertedConversation.userId == insertedUser.id, "Inserted user conversation's user ID is correct" ) assert( insertedConversation.conversationId == conversationId, "Inserted user conversation's conversation ID is correct" ) assert( dbConversation == Some( UserIntercomConversation(insertedUser.id, conversationId) ), "Fetched user conversation object matches what was inserted" ) true } ) } } }
raster-foundry/raster-foundry
app-backend/db/src/test/scala/com/azavea/rf/database/UserIntercomConversationDaoSpec.scala
Scala
apache-2.0
1,742
package colang.backend import colang.ast.parsed.RootNamespace /** * Represents a compiler component that transforms intermediate code representation (after semantic analysis) to * the target representation. */ trait Backend { /** * Perform the transformation. * @param rootNamespace populated root namespace after analysis */ def process(rootNamespace: RootNamespace): Unit }
merkispavel/colang
src/main/scala/colang/backend/Backend.scala
Scala
mit
401
//: ---------------------------------------------------------------------------- //: Copyright (C) 2015 Verizon. All Rights Reserved. //: //: Licensed under the Apache License, Version 2.0 (the "License"); //: you may not use this file except in compliance with the License. //: You may obtain a copy of the License at //: //: http://www.apache.org/licenses/LICENSE-2.0 //: //: Unless required by applicable law or agreed to in writing, software //: distributed under the License is distributed on an "AS IS" BASIS, //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //: See the License for the specific language governing permissions and //: limitations under the License. //: //: ---------------------------------------------------------------------------- package funnel package agent import funnel.zeromq._, sockets._ import scalaz.concurrent.{Task,Strategy} import scalaz.stream.{Process,time} import scala.concurrent.duration._ object TestingMultiJvmPusher1 extends ApplicationPusher("push-1") object TestingMultiJvmPusher2 extends ApplicationPusher("push-2") object TestingMultiJvmPublisher { def main(args: Array[String]): Unit = { val (i,o) = (for { a <- Endpoint(pull &&& bind, Settings.uri) b <- Endpoint(publish &&& bind, Settings.tcp) } yield (a,b)).getOrElse(sys.error("Unable to configure the endpoints for the agent.")) new zeromq.Proxy(i,o).task.run } } object TestingMultiJvmSubscriber { import scalaz.stream.io import scalaz.stream.async.signalOf val S = Strategy.Executor(Monitoring.defaultPool) def main(args: Array[String]): Unit = { val E = Endpoint(subscribe &&& (connect ~ topics.all), Settings.tcp ).getOrElse(sys.error("Unable to configure the TCP subscriber endpoint")) Ø.link(E)(Fixtures.signal)(Ø.receive).map(t => new String(t.bytes)).to(io.stdOut).run.runAsync(_ => ()) time.sleep(10.seconds)(S, Monitoring.schedulingPool) .onComplete(Process.eval_(Fixtures.signal.get)).run.run println("Subscriber - Stopping the task...") } }
neigor/funnel
agent/src/multi-jvm/scala/Push+Pull+Pub+Sub.scala
Scala
apache-2.0
2,089
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // scalastyle:off println package org.apache.spark.examples.ml // $example on$ import org.apache.spark.ml.feature.FValueSelector import org.apache.spark.ml.linalg.Vectors // $example off$ import org.apache.spark.sql.SparkSession /** * An example for FValueSelector. * Run with * {{{ * bin/run-example ml.FValueSelectorExample * }}} */ object FValueSelectorExample { def main(args: Array[String]): Unit = { val spark = SparkSession .builder .appName("FValueSelectorExample") .getOrCreate() import spark.implicits._ // $example on$ val data = Seq( (1, Vectors.dense(6.0, 7.0, 0.0, 7.0, 6.0, 0.0), 4.6), (2, Vectors.dense(0.0, 9.0, 6.0, 0.0, 5.0, 9.0), 6.6), (3, Vectors.dense(0.0, 9.0, 3.0, 0.0, 5.0, 5.0), 5.1), (4, Vectors.dense(0.0, 9.0, 8.0, 5.0, 6.0, 4.0), 7.6), (5, Vectors.dense(8.0, 9.0, 6.0, 5.0, 4.0, 4.0), 9.0), (6, Vectors.dense(8.0, 9.0, 6.0, 4.0, 0.0, 0.0), 9.0) ) val df = spark.createDataset(data).toDF("id", "features", "label") val selector = new FValueSelector() .setNumTopFeatures(1) .setFeaturesCol("features") .setLabelCol("label") .setOutputCol("selectedFeatures") val result = selector.fit(df).transform(df) println(s"FValueSelector output with top ${selector.getNumTopFeatures} features selected") result.show() // $example off$ spark.stop() } } // scalastyle:on println
dbtsai/spark
examples/src/main/scala/org/apache/spark/examples/ml/FValueSelectorExample.scala
Scala
apache-2.0
2,240
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.status import java.util.Date import java.util.concurrent.ConcurrentHashMap import java.util.function.Function import scala.collection.JavaConverters._ import scala.collection.mutable.HashMap import org.apache.spark._ import org.apache.spark.executor.{ExecutorMetrics, TaskMetrics} import org.apache.spark.internal.Logging import org.apache.spark.scheduler._ import org.apache.spark.status.api.v1 import org.apache.spark.storage._ import org.apache.spark.ui.SparkUI import org.apache.spark.ui.scope._ /** * A Spark listener that writes application information to a data store. The types written to the * store are defined in the `storeTypes.scala` file and are based on the public REST API. * * @param lastUpdateTime When replaying logs, the log's last update time, so that the duration of * unfinished tasks can be more accurately calculated (see SPARK-21922). */ private[spark] class AppStatusListener( kvstore: ElementTrackingStore, conf: SparkConf, live: Boolean, appStatusSource: Option[AppStatusSource] = None, lastUpdateTime: Option[Long] = None) extends SparkListener with Logging { import config._ private var sparkVersion = SPARK_VERSION private var appInfo: v1.ApplicationInfo = null private var appSummary = new AppSummary(0, 0) private var coresPerTask: Int = 1 // How often to update live entities. -1 means "never update" when replaying applications, // meaning only the last write will happen. For live applications, this avoids a few // operations that we can live without when rapidly processing incoming task events. private val liveUpdatePeriodNs = if (live) conf.get(LIVE_ENTITY_UPDATE_PERIOD) else -1L private val maxTasksPerStage = conf.get(MAX_RETAINED_TASKS_PER_STAGE) private val maxGraphRootNodes = conf.get(MAX_RETAINED_ROOT_NODES) // Keep track of live entities, so that task metrics can be efficiently updated (without // causing too many writes to the underlying store, and other expensive operations). private val liveStages = new ConcurrentHashMap[(Int, Int), LiveStage]() private val liveJobs = new HashMap[Int, LiveJob]() private val liveExecutors = new HashMap[String, LiveExecutor]() private val deadExecutors = new HashMap[String, LiveExecutor]() private val liveTasks = new HashMap[Long, LiveTask]() private val liveRDDs = new HashMap[Int, LiveRDD]() private val pools = new HashMap[String, SchedulerPool]() // Keep the active executor count as a separate variable to avoid having to do synchronization // around liveExecutors. @volatile private var activeExecutorCount = 0 kvstore.addTrigger(classOf[ExecutorSummaryWrapper], conf.get(MAX_RETAINED_DEAD_EXECUTORS)) { count => cleanupExecutors(count) } kvstore.addTrigger(classOf[JobDataWrapper], conf.get(MAX_RETAINED_JOBS)) { count => cleanupJobs(count) } kvstore.addTrigger(classOf[StageDataWrapper], conf.get(MAX_RETAINED_STAGES)) { count => cleanupStages(count) } kvstore.onFlush { if (!live) { flush() } } override def onOtherEvent(event: SparkListenerEvent): Unit = event match { case SparkListenerLogStart(version) => sparkVersion = version case _ => } override def onApplicationStart(event: SparkListenerApplicationStart): Unit = { assert(event.appId.isDefined, "Application without IDs are not supported.") val attempt = v1.ApplicationAttemptInfo( event.appAttemptId, new Date(event.time), new Date(-1), new Date(event.time), -1L, event.sparkUser, false, sparkVersion) appInfo = v1.ApplicationInfo( event.appId.get, event.appName, None, None, None, None, Seq(attempt)) kvstore.write(new ApplicationInfoWrapper(appInfo)) kvstore.write(appSummary) // Update the driver block manager with logs from this event. The SparkContext initialization // code registers the driver before this event is sent. event.driverLogs.foreach { logs => val driver = liveExecutors.get(SparkContext.DRIVER_IDENTIFIER) .orElse(liveExecutors.get(SparkContext.LEGACY_DRIVER_IDENTIFIER)) driver.foreach { d => d.executorLogs = logs.toMap update(d, System.nanoTime()) } } } override def onEnvironmentUpdate(event: SparkListenerEnvironmentUpdate): Unit = { val details = event.environmentDetails val jvmInfo = Map(details("JVM Information"): _*) val runtime = new v1.RuntimeInfo( jvmInfo.get("Java Version").orNull, jvmInfo.get("Java Home").orNull, jvmInfo.get("Scala Version").orNull) val envInfo = new v1.ApplicationEnvironmentInfo( runtime, details.getOrElse("Spark Properties", Nil), details.getOrElse("System Properties", Nil), details.getOrElse("Classpath Entries", Nil)) coresPerTask = envInfo.sparkProperties.toMap.get("spark.task.cpus").map(_.toInt) .getOrElse(coresPerTask) kvstore.write(new ApplicationEnvironmentInfoWrapper(envInfo)) } override def onApplicationEnd(event: SparkListenerApplicationEnd): Unit = { val old = appInfo.attempts.head val attempt = v1.ApplicationAttemptInfo( old.attemptId, old.startTime, new Date(event.time), new Date(event.time), event.time - old.startTime.getTime(), old.sparkUser, true, old.appSparkVersion) appInfo = v1.ApplicationInfo( appInfo.id, appInfo.name, None, None, None, None, Seq(attempt)) kvstore.write(new ApplicationInfoWrapper(appInfo)) } override def onExecutorAdded(event: SparkListenerExecutorAdded): Unit = { // This needs to be an update in case an executor re-registers after the driver has // marked it as "dead". val exec = getOrCreateExecutor(event.executorId, event.time) exec.host = event.executorInfo.executorHost exec.isActive = true exec.totalCores = event.executorInfo.totalCores exec.maxTasks = event.executorInfo.totalCores / coresPerTask exec.executorLogs = event.executorInfo.logUrlMap liveUpdate(exec, System.nanoTime()) } override def onExecutorRemoved(event: SparkListenerExecutorRemoved): Unit = { liveExecutors.remove(event.executorId).foreach { exec => val now = System.nanoTime() activeExecutorCount = math.max(0, activeExecutorCount - 1) exec.isActive = false exec.removeTime = new Date(event.time) exec.removeReason = event.reason update(exec, now, last = true) // Remove all RDD distributions that reference the removed executor, in case there wasn't // a corresponding event. liveRDDs.values.foreach { rdd => if (rdd.removeDistribution(exec)) { update(rdd, now) } } if (isExecutorActiveForLiveStages(exec)) { // the executor was running for a currently active stage, so save it for now in // deadExecutors, and remove when there are no active stages overlapping with the // executor. deadExecutors.put(event.executorId, exec) } } } /** Was the specified executor active for any currently live stages? */ private def isExecutorActiveForLiveStages(exec: LiveExecutor): Boolean = { liveStages.values.asScala.exists { stage => stage.info.submissionTime.getOrElse(0L) < exec.removeTime.getTime } } override def onExecutorBlacklisted(event: SparkListenerExecutorBlacklisted): Unit = { updateBlackListStatus(event.executorId, true) } override def onExecutorBlacklistedForStage( event: SparkListenerExecutorBlacklistedForStage): Unit = { val now = System.nanoTime() Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage => setStageBlackListStatus(stage, now, event.executorId) } liveExecutors.get(event.executorId).foreach { exec => addBlackListedStageTo(exec, event.stageId, now) } } override def onNodeBlacklistedForStage(event: SparkListenerNodeBlacklistedForStage): Unit = { val now = System.nanoTime() // Implicitly blacklist every available executor for the stage associated with this node Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage => val executorIds = liveExecutors.values.filter(_.host == event.hostId).map(_.executorId).toSeq setStageBlackListStatus(stage, now, executorIds: _*) } liveExecutors.values.filter(_.hostname == event.hostId).foreach { exec => addBlackListedStageTo(exec, event.stageId, now) } } private def addBlackListedStageTo(exec: LiveExecutor, stageId: Int, now: Long): Unit = { exec.blacklistedInStages += stageId liveUpdate(exec, now) } private def setStageBlackListStatus(stage: LiveStage, now: Long, executorIds: String*): Unit = { executorIds.foreach { executorId => val executorStageSummary = stage.executorSummary(executorId) executorStageSummary.isBlacklisted = true maybeUpdate(executorStageSummary, now) } stage.blackListedExecutors ++= executorIds maybeUpdate(stage, now) } override def onExecutorUnblacklisted(event: SparkListenerExecutorUnblacklisted): Unit = { updateBlackListStatus(event.executorId, false) } override def onNodeBlacklisted(event: SparkListenerNodeBlacklisted): Unit = { updateNodeBlackList(event.hostId, true) } override def onNodeUnblacklisted(event: SparkListenerNodeUnblacklisted): Unit = { updateNodeBlackList(event.hostId, false) } private def updateBlackListStatus(execId: String, blacklisted: Boolean): Unit = { liveExecutors.get(execId).foreach { exec => exec.isBlacklisted = blacklisted if (blacklisted) { appStatusSource.foreach(_.BLACKLISTED_EXECUTORS.inc()) } else { appStatusSource.foreach(_.UNBLACKLISTED_EXECUTORS.inc()) } liveUpdate(exec, System.nanoTime()) } } private def updateNodeBlackList(host: String, blacklisted: Boolean): Unit = { val now = System.nanoTime() // Implicitly (un)blacklist every executor associated with the node. liveExecutors.values.foreach { exec => if (exec.hostname == host) { exec.isBlacklisted = blacklisted liveUpdate(exec, now) } } } override def onJobStart(event: SparkListenerJobStart): Unit = { val now = System.nanoTime() // Compute (a potential over-estimate of) the number of tasks that will be run by this job. // This may be an over-estimate because the job start event references all of the result // stages' transitive stage dependencies, but some of these stages might be skipped if their // output is available from earlier runs. // See https://github.com/apache/spark/pull/3009 for a more extensive discussion. val numTasks = { val missingStages = event.stageInfos.filter(_.completionTime.isEmpty) missingStages.map(_.numTasks).sum } val lastStageInfo = event.stageInfos.sortBy(_.stageId).lastOption val lastStageName = lastStageInfo.map(_.name).getOrElse("(Unknown Stage Name)") val jobGroup = Option(event.properties) .flatMap { p => Option(p.getProperty(SparkContext.SPARK_JOB_GROUP_ID)) } val job = new LiveJob( event.jobId, lastStageName, if (event.time > 0) Some(new Date(event.time)) else None, event.stageIds, jobGroup, numTasks) liveJobs.put(event.jobId, job) liveUpdate(job, now) event.stageInfos.foreach { stageInfo => // A new job submission may re-use an existing stage, so this code needs to do an update // instead of just a write. val stage = getOrCreateStage(stageInfo) stage.jobs :+= job stage.jobIds += event.jobId liveUpdate(stage, now) } // Create the graph data for all the job's stages. event.stageInfos.foreach { stage => val graph = RDDOperationGraph.makeOperationGraph(stage, maxGraphRootNodes) val uigraph = new RDDOperationGraphWrapper( stage.stageId, graph.edges, graph.outgoingEdges, graph.incomingEdges, newRDDOperationCluster(graph.rootCluster)) kvstore.write(uigraph) } } private def newRDDOperationCluster(cluster: RDDOperationCluster): RDDOperationClusterWrapper = { new RDDOperationClusterWrapper( cluster.id, cluster.name, cluster.childNodes, cluster.childClusters.map(newRDDOperationCluster)) } override def onJobEnd(event: SparkListenerJobEnd): Unit = { liveJobs.remove(event.jobId).foreach { job => val now = System.nanoTime() // Check if there are any pending stages that match this job; mark those as skipped. val it = liveStages.entrySet.iterator() while (it.hasNext()) { val e = it.next() if (job.stageIds.contains(e.getKey()._1)) { val stage = e.getValue() if (v1.StageStatus.PENDING.equals(stage.status)) { stage.status = v1.StageStatus.SKIPPED job.skippedStages += stage.info.stageId job.skippedTasks += stage.info.numTasks job.activeStages -= 1 pools.get(stage.schedulingPool).foreach { pool => pool.stageIds = pool.stageIds - stage.info.stageId update(pool, now) } it.remove() update(stage, now, last = true) } } } job.status = event.jobResult match { case JobSucceeded => appStatusSource.foreach{_.SUCCEEDED_JOBS.inc()} JobExecutionStatus.SUCCEEDED case JobFailed(_) => appStatusSource.foreach{_.FAILED_JOBS.inc()} JobExecutionStatus.FAILED } job.completionTime = if (event.time > 0) Some(new Date(event.time)) else None for { source <- appStatusSource submissionTime <- job.submissionTime completionTime <- job.completionTime } { source.JOB_DURATION.value.set(completionTime.getTime() - submissionTime.getTime()) } // update global app status counters appStatusSource.foreach { source => source.COMPLETED_STAGES.inc(job.completedStages.size) source.FAILED_STAGES.inc(job.failedStages) source.COMPLETED_TASKS.inc(job.completedTasks) source.FAILED_TASKS.inc(job.failedTasks) source.KILLED_TASKS.inc(job.killedTasks) source.SKIPPED_TASKS.inc(job.skippedTasks) source.SKIPPED_STAGES.inc(job.skippedStages.size) } update(job, now, last = true) if (job.status == JobExecutionStatus.SUCCEEDED) { appSummary = new AppSummary(appSummary.numCompletedJobs + 1, appSummary.numCompletedStages) kvstore.write(appSummary) } } } override def onStageSubmitted(event: SparkListenerStageSubmitted): Unit = { val now = System.nanoTime() val stage = getOrCreateStage(event.stageInfo) stage.status = v1.StageStatus.ACTIVE stage.schedulingPool = Option(event.properties).flatMap { p => Option(p.getProperty("spark.scheduler.pool")) }.getOrElse(SparkUI.DEFAULT_POOL_NAME) // Look at all active jobs to find the ones that mention this stage. stage.jobs = liveJobs.values .filter(_.stageIds.contains(event.stageInfo.stageId)) .toSeq stage.jobIds = stage.jobs.map(_.jobId).toSet stage.description = Option(event.properties).flatMap { p => Option(p.getProperty(SparkContext.SPARK_JOB_DESCRIPTION)) } stage.jobs.foreach { job => job.completedStages = job.completedStages - event.stageInfo.stageId job.activeStages += 1 liveUpdate(job, now) } val pool = pools.getOrElseUpdate(stage.schedulingPool, new SchedulerPool(stage.schedulingPool)) pool.stageIds = pool.stageIds + event.stageInfo.stageId update(pool, now) event.stageInfo.rddInfos.foreach { info => if (info.storageLevel.isValid) { liveUpdate(liveRDDs.getOrElseUpdate(info.id, new LiveRDD(info)), now) } } liveUpdate(stage, now) } override def onTaskStart(event: SparkListenerTaskStart): Unit = { val now = System.nanoTime() val task = new LiveTask(event.taskInfo, event.stageId, event.stageAttemptId, lastUpdateTime) liveTasks.put(event.taskInfo.taskId, task) liveUpdate(task, now) Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage => stage.activeTasks += 1 stage.firstLaunchTime = math.min(stage.firstLaunchTime, event.taskInfo.launchTime) val locality = event.taskInfo.taskLocality.toString() val count = stage.localitySummary.getOrElse(locality, 0L) + 1L stage.localitySummary = stage.localitySummary ++ Map(locality -> count) maybeUpdate(stage, now) stage.jobs.foreach { job => job.activeTasks += 1 maybeUpdate(job, now) } if (stage.savedTasks.incrementAndGet() > maxTasksPerStage && !stage.cleaning) { stage.cleaning = true kvstore.doAsync { cleanupTasks(stage) } } } liveExecutors.get(event.taskInfo.executorId).foreach { exec => exec.activeTasks += 1 exec.totalTasks += 1 maybeUpdate(exec, now) } } override def onTaskGettingResult(event: SparkListenerTaskGettingResult): Unit = { // Call update on the task so that the "getting result" time is written to the store; the // value is part of the mutable TaskInfo state that the live entity already references. liveTasks.get(event.taskInfo.taskId).foreach { task => maybeUpdate(task, System.nanoTime()) } } override def onTaskEnd(event: SparkListenerTaskEnd): Unit = { // TODO: can this really happen? if (event.taskInfo == null) { return } val now = System.nanoTime() val metricsDelta = liveTasks.remove(event.taskInfo.taskId).map { task => task.info = event.taskInfo val errorMessage = event.reason match { case Success => None case k: TaskKilled => Some(k.reason) case e: ExceptionFailure => // Handle ExceptionFailure because we might have accumUpdates Some(e.toErrorString) case e: TaskFailedReason => // All other failure cases Some(e.toErrorString) case other => logInfo(s"Unhandled task end reason: $other") None } task.errorMessage = errorMessage val delta = task.updateMetrics(event.taskMetrics) update(task, now, last = true) delta }.orNull val (completedDelta, failedDelta, killedDelta) = event.reason match { case Success => (1, 0, 0) case _: TaskKilled => (0, 0, 1) case _: TaskCommitDenied => (0, 0, 1) case _ => (0, 1, 0) } Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage => if (metricsDelta != null) { stage.metrics = LiveEntityHelpers.addMetrics(stage.metrics, metricsDelta) } stage.activeTasks -= 1 stage.completedTasks += completedDelta if (completedDelta > 0) { stage.completedIndices.add(event.taskInfo.index) } stage.failedTasks += failedDelta stage.killedTasks += killedDelta if (killedDelta > 0) { stage.killedSummary = killedTasksSummary(event.reason, stage.killedSummary) } // [SPARK-24415] Wait for all tasks to finish before removing stage from live list val removeStage = stage.activeTasks == 0 && (v1.StageStatus.COMPLETE.equals(stage.status) || v1.StageStatus.FAILED.equals(stage.status)) if (removeStage) { update(stage, now, last = true) } else { maybeUpdate(stage, now) } // Store both stage ID and task index in a single long variable for tracking at job level. val taskIndex = (event.stageId.toLong << Integer.SIZE) | event.taskInfo.index stage.jobs.foreach { job => job.activeTasks -= 1 job.completedTasks += completedDelta if (completedDelta > 0) { job.completedIndices.add(taskIndex) } job.failedTasks += failedDelta job.killedTasks += killedDelta if (killedDelta > 0) { job.killedSummary = killedTasksSummary(event.reason, job.killedSummary) } conditionalLiveUpdate(job, now, removeStage) } val esummary = stage.executorSummary(event.taskInfo.executorId) esummary.taskTime += event.taskInfo.duration esummary.succeededTasks += completedDelta esummary.failedTasks += failedDelta esummary.killedTasks += killedDelta if (metricsDelta != null) { esummary.metrics = LiveEntityHelpers.addMetrics(esummary.metrics, metricsDelta) } conditionalLiveUpdate(esummary, now, removeStage) if (!stage.cleaning && stage.savedTasks.get() > maxTasksPerStage) { stage.cleaning = true kvstore.doAsync { cleanupTasks(stage) } } if (removeStage) { liveStages.remove((event.stageId, event.stageAttemptId)) } } liveExecutors.get(event.taskInfo.executorId).foreach { exec => exec.activeTasks -= 1 exec.completedTasks += completedDelta exec.failedTasks += failedDelta exec.totalDuration += event.taskInfo.duration // Note: For resubmitted tasks, we continue to use the metrics that belong to the // first attempt of this task. This may not be 100% accurate because the first attempt // could have failed half-way through. The correct fix would be to keep track of the // metrics added by each attempt, but this is much more complicated. if (event.reason != Resubmitted) { if (event.taskMetrics != null) { val readMetrics = event.taskMetrics.shuffleReadMetrics exec.totalGcTime += event.taskMetrics.jvmGCTime exec.totalInputBytes += event.taskMetrics.inputMetrics.bytesRead exec.totalShuffleRead += readMetrics.localBytesRead + readMetrics.remoteBytesRead exec.totalShuffleWrite += event.taskMetrics.shuffleWriteMetrics.bytesWritten } } // Force an update on live applications when the number of active tasks reaches 0. This is // checked in some tests (e.g. SQLTestUtilsBase) so it needs to be reliably up to date. conditionalLiveUpdate(exec, now, exec.activeTasks == 0) } } override def onStageCompleted(event: SparkListenerStageCompleted): Unit = { val maybeStage = Option(liveStages.get((event.stageInfo.stageId, event.stageInfo.attemptNumber))) maybeStage.foreach { stage => val now = System.nanoTime() stage.info = event.stageInfo // Because of SPARK-20205, old event logs may contain valid stages without a submission time // in their start event. In those cases, we can only detect whether a stage was skipped by // waiting until the completion event, at which point the field would have been set. stage.status = event.stageInfo.failureReason match { case Some(_) => v1.StageStatus.FAILED case _ if event.stageInfo.submissionTime.isDefined => v1.StageStatus.COMPLETE case _ => v1.StageStatus.SKIPPED } stage.jobs.foreach { job => stage.status match { case v1.StageStatus.COMPLETE => job.completedStages += event.stageInfo.stageId case v1.StageStatus.SKIPPED => job.skippedStages += event.stageInfo.stageId job.skippedTasks += event.stageInfo.numTasks case _ => job.failedStages += 1 } job.activeStages -= 1 liveUpdate(job, now) } pools.get(stage.schedulingPool).foreach { pool => pool.stageIds = pool.stageIds - event.stageInfo.stageId update(pool, now) } stage.executorSummaries.values.foreach(update(_, now)) val executorIdsForStage = stage.blackListedExecutors executorIdsForStage.foreach { executorId => liveExecutors.get(executorId).foreach { exec => removeBlackListedStageFrom(exec, event.stageInfo.stageId, now) } } // Remove stage only if there are no active tasks remaining val removeStage = stage.activeTasks == 0 update(stage, now, last = removeStage) if (removeStage) { liveStages.remove((event.stageInfo.stageId, event.stageInfo.attemptNumber)) } if (stage.status == v1.StageStatus.COMPLETE) { appSummary = new AppSummary(appSummary.numCompletedJobs, appSummary.numCompletedStages + 1) kvstore.write(appSummary) } } // remove any dead executors that were not running for any currently active stages deadExecutors.retain((execId, exec) => isExecutorActiveForLiveStages(exec)) } private def removeBlackListedStageFrom(exec: LiveExecutor, stageId: Int, now: Long) = { exec.blacklistedInStages -= stageId liveUpdate(exec, now) } override def onBlockManagerAdded(event: SparkListenerBlockManagerAdded): Unit = { // This needs to set fields that are already set by onExecutorAdded because the driver is // considered an "executor" in the UI, but does not have a SparkListenerExecutorAdded event. val exec = getOrCreateExecutor(event.blockManagerId.executorId, event.time) exec.hostPort = event.blockManagerId.hostPort event.maxOnHeapMem.foreach { _ => exec.totalOnHeap = event.maxOnHeapMem.get exec.totalOffHeap = event.maxOffHeapMem.get } exec.isActive = true exec.maxMemory = event.maxMem liveUpdate(exec, System.nanoTime()) } override def onBlockManagerRemoved(event: SparkListenerBlockManagerRemoved): Unit = { // Nothing to do here. Covered by onExecutorRemoved. } override def onUnpersistRDD(event: SparkListenerUnpersistRDD): Unit = { liveRDDs.remove(event.rddId).foreach { liveRDD => val storageLevel = liveRDD.info.storageLevel // Use RDD partition info to update executor block info. liveRDD.getPartitions().foreach { case (_, part) => part.executors.foreach { executorId => liveExecutors.get(executorId).foreach { exec => exec.rddBlocks = exec.rddBlocks - 1 } } } val now = System.nanoTime() // Use RDD distribution to update executor memory and disk usage info. liveRDD.getDistributions().foreach { case (executorId, rddDist) => liveExecutors.get(executorId).foreach { exec => if (exec.hasMemoryInfo) { if (storageLevel.useOffHeap) { exec.usedOffHeap = addDeltaToValue(exec.usedOffHeap, -rddDist.offHeapUsed) } else { exec.usedOnHeap = addDeltaToValue(exec.usedOnHeap, -rddDist.onHeapUsed) } } exec.memoryUsed = addDeltaToValue(exec.memoryUsed, -rddDist.memoryUsed) exec.diskUsed = addDeltaToValue(exec.diskUsed, -rddDist.diskUsed) maybeUpdate(exec, now) } } } kvstore.delete(classOf[RDDStorageInfoWrapper], event.rddId) } override def onExecutorMetricsUpdate(event: SparkListenerExecutorMetricsUpdate): Unit = { val now = System.nanoTime() event.accumUpdates.foreach { case (taskId, sid, sAttempt, accumUpdates) => liveTasks.get(taskId).foreach { task => val metrics = TaskMetrics.fromAccumulatorInfos(accumUpdates) val delta = task.updateMetrics(metrics) maybeUpdate(task, now) Option(liveStages.get((sid, sAttempt))).foreach { stage => stage.metrics = LiveEntityHelpers.addMetrics(stage.metrics, delta) maybeUpdate(stage, now) val esummary = stage.executorSummary(event.execId) esummary.metrics = LiveEntityHelpers.addMetrics(esummary.metrics, delta) maybeUpdate(esummary, now) } } } // check if there is a new peak value for any of the executor level memory metrics // for the live UI. SparkListenerExecutorMetricsUpdate events are only processed // for the live UI. event.executorUpdates.foreach { updates => liveExecutors.get(event.execId).foreach { exec => if (exec.peakExecutorMetrics.compareAndUpdatePeakValues(updates)) { maybeUpdate(exec, now) } } } } override def onStageExecutorMetrics(executorMetrics: SparkListenerStageExecutorMetrics): Unit = { val now = System.nanoTime() // check if there is a new peak value for any of the executor level memory metrics, // while reading from the log. SparkListenerStageExecutorMetrics are only processed // when reading logs. liveExecutors.get(executorMetrics.execId) .orElse(deadExecutors.get(executorMetrics.execId)).map { exec => if (exec.peakExecutorMetrics.compareAndUpdatePeakValues(executorMetrics.executorMetrics)) { update(exec, now) } } } override def onBlockUpdated(event: SparkListenerBlockUpdated): Unit = { event.blockUpdatedInfo.blockId match { case block: RDDBlockId => updateRDDBlock(event, block) case stream: StreamBlockId => updateStreamBlock(event, stream) case _ => } } /** Flush all live entities' data to the underlying store. */ private def flush(): Unit = { val now = System.nanoTime() liveStages.values.asScala.foreach { stage => update(stage, now) stage.executorSummaries.values.foreach(update(_, now)) } liveJobs.values.foreach(update(_, now)) liveExecutors.values.foreach(update(_, now)) liveTasks.values.foreach(update(_, now)) liveRDDs.values.foreach(update(_, now)) pools.values.foreach(update(_, now)) } /** * Shortcut to get active stages quickly in a live application, for use by the console * progress bar. */ def activeStages(): Seq[v1.StageData] = { liveStages.values.asScala .filter(_.info.submissionTime.isDefined) .map(_.toApi()) .toList .sortBy(_.stageId) } /** * Apply a delta to a value, but ensure that it doesn't go negative. */ private def addDeltaToValue(old: Long, delta: Long): Long = math.max(0, old + delta) private def updateRDDBlock(event: SparkListenerBlockUpdated, block: RDDBlockId): Unit = { val now = System.nanoTime() val executorId = event.blockUpdatedInfo.blockManagerId.executorId // Whether values are being added to or removed from the existing accounting. val storageLevel = event.blockUpdatedInfo.storageLevel val diskDelta = event.blockUpdatedInfo.diskSize * (if (storageLevel.useDisk) 1 else -1) val memoryDelta = event.blockUpdatedInfo.memSize * (if (storageLevel.useMemory) 1 else -1) val updatedStorageLevel = if (storageLevel.isValid) { Some(storageLevel.description) } else { None } // We need information about the executor to update some memory accounting values in the // RDD info, so read that beforehand. val maybeExec = liveExecutors.get(executorId) var rddBlocksDelta = 0 // Update the executor stats first, since they are used to calculate the free memory // on tracked RDD distributions. maybeExec.foreach { exec => if (exec.hasMemoryInfo) { if (storageLevel.useOffHeap) { exec.usedOffHeap = addDeltaToValue(exec.usedOffHeap, memoryDelta) } else { exec.usedOnHeap = addDeltaToValue(exec.usedOnHeap, memoryDelta) } } exec.memoryUsed = addDeltaToValue(exec.memoryUsed, memoryDelta) exec.diskUsed = addDeltaToValue(exec.diskUsed, diskDelta) } // Update the block entry in the RDD info, keeping track of the deltas above so that we // can update the executor information too. liveRDDs.get(block.rddId).foreach { rdd => if (updatedStorageLevel.isDefined) { rdd.setStorageLevel(updatedStorageLevel.get) } val partition = rdd.partition(block.name) val executors = if (updatedStorageLevel.isDefined) { val current = partition.executors if (current.contains(executorId)) { current } else { rddBlocksDelta = 1 current :+ executorId } } else { rddBlocksDelta = -1 partition.executors.filter(_ != executorId) } // Only update the partition if it's still stored in some executor, otherwise get rid of it. if (executors.nonEmpty) { partition.update(executors, rdd.storageLevel, addDeltaToValue(partition.memoryUsed, memoryDelta), addDeltaToValue(partition.diskUsed, diskDelta)) } else { rdd.removePartition(block.name) } maybeExec.foreach { exec => if (exec.rddBlocks + rddBlocksDelta > 0) { val dist = rdd.distribution(exec) dist.memoryUsed = addDeltaToValue(dist.memoryUsed, memoryDelta) dist.diskUsed = addDeltaToValue(dist.diskUsed, diskDelta) if (exec.hasMemoryInfo) { if (storageLevel.useOffHeap) { dist.offHeapUsed = addDeltaToValue(dist.offHeapUsed, memoryDelta) } else { dist.onHeapUsed = addDeltaToValue(dist.onHeapUsed, memoryDelta) } } dist.lastUpdate = null } else { rdd.removeDistribution(exec) } // Trigger an update on other RDDs so that the free memory information is updated. liveRDDs.values.foreach { otherRdd => if (otherRdd.info.id != block.rddId) { otherRdd.distributionOpt(exec).foreach { dist => dist.lastUpdate = null update(otherRdd, now) } } } } rdd.memoryUsed = addDeltaToValue(rdd.memoryUsed, memoryDelta) rdd.diskUsed = addDeltaToValue(rdd.diskUsed, diskDelta) update(rdd, now) } // Finish updating the executor now that we know the delta in the number of blocks. maybeExec.foreach { exec => exec.rddBlocks += rddBlocksDelta maybeUpdate(exec, now) } } private def getOrCreateExecutor(executorId: String, addTime: Long): LiveExecutor = { liveExecutors.getOrElseUpdate(executorId, { activeExecutorCount += 1 new LiveExecutor(executorId, addTime) }) } private def updateStreamBlock(event: SparkListenerBlockUpdated, stream: StreamBlockId): Unit = { val storageLevel = event.blockUpdatedInfo.storageLevel if (storageLevel.isValid) { val data = new StreamBlockData( stream.name, event.blockUpdatedInfo.blockManagerId.executorId, event.blockUpdatedInfo.blockManagerId.hostPort, storageLevel.description, storageLevel.useMemory, storageLevel.useDisk, storageLevel.deserialized, event.blockUpdatedInfo.memSize, event.blockUpdatedInfo.diskSize) kvstore.write(data) } else { kvstore.delete(classOf[StreamBlockData], Array(stream.name, event.blockUpdatedInfo.blockManagerId.executorId)) } } private def getOrCreateStage(info: StageInfo): LiveStage = { val stage = liveStages.computeIfAbsent((info.stageId, info.attemptNumber), new Function[(Int, Int), LiveStage]() { override def apply(key: (Int, Int)): LiveStage = new LiveStage() }) stage.info = info stage } private def killedTasksSummary( reason: TaskEndReason, oldSummary: Map[String, Int]): Map[String, Int] = { reason match { case k: TaskKilled => oldSummary.updated(k.reason, oldSummary.getOrElse(k.reason, 0) + 1) case denied: TaskCommitDenied => val reason = denied.toErrorString oldSummary.updated(reason, oldSummary.getOrElse(reason, 0) + 1) case _ => oldSummary } } private def update(entity: LiveEntity, now: Long, last: Boolean = false): Unit = { entity.write(kvstore, now, checkTriggers = last) } /** Update a live entity only if it hasn't been updated in the last configured period. */ private def maybeUpdate(entity: LiveEntity, now: Long): Unit = { if (live && liveUpdatePeriodNs >= 0 && now - entity.lastWriteTime > liveUpdatePeriodNs) { update(entity, now) } } /** Update an entity only if in a live app; avoids redundant writes when replaying logs. */ private def liveUpdate(entity: LiveEntity, now: Long): Unit = { if (live) { update(entity, now) } } private def conditionalLiveUpdate(entity: LiveEntity, now: Long, condition: Boolean): Unit = { if (condition) { liveUpdate(entity, now) } else { maybeUpdate(entity, now) } } private def cleanupExecutors(count: Long): Unit = { // Because the limit is on the number of *dead* executors, we need to calculate whether // there are actually enough dead executors to be deleted. val threshold = conf.get(MAX_RETAINED_DEAD_EXECUTORS) val dead = count - activeExecutorCount if (dead > threshold) { val countToDelete = calculateNumberToRemove(dead, threshold) val toDelete = kvstore.view(classOf[ExecutorSummaryWrapper]).index("active") .max(countToDelete).first(false).last(false).asScala.toSeq toDelete.foreach { e => kvstore.delete(e.getClass(), e.info.id) } } } private def cleanupJobs(count: Long): Unit = { val countToDelete = calculateNumberToRemove(count, conf.get(MAX_RETAINED_JOBS)) if (countToDelete <= 0L) { return } val view = kvstore.view(classOf[JobDataWrapper]).index("completionTime").first(0L) val toDelete = KVUtils.viewToSeq(view, countToDelete.toInt) { j => j.info.status != JobExecutionStatus.RUNNING && j.info.status != JobExecutionStatus.UNKNOWN } toDelete.foreach { j => kvstore.delete(j.getClass(), j.info.jobId) } } private def cleanupStages(count: Long): Unit = { val countToDelete = calculateNumberToRemove(count, conf.get(MAX_RETAINED_STAGES)) if (countToDelete <= 0L) { return } // As the completion time of a skipped stage is always -1, we will remove skipped stages first. // This is safe since the job itself contains enough information to render skipped stages in the // UI. val view = kvstore.view(classOf[StageDataWrapper]).index("completionTime") val stages = KVUtils.viewToSeq(view, countToDelete.toInt) { s => s.info.status != v1.StageStatus.ACTIVE && s.info.status != v1.StageStatus.PENDING } stages.foreach { s => val key = Array(s.info.stageId, s.info.attemptId) kvstore.delete(s.getClass(), key) val execSummaries = kvstore.view(classOf[ExecutorStageSummaryWrapper]) .index("stage") .first(key) .last(key) .asScala .toSeq execSummaries.foreach { e => kvstore.delete(e.getClass(), e.id) } // Check whether there are remaining attempts for the same stage. If there aren't, then // also delete the RDD graph data. val remainingAttempts = kvstore.view(classOf[StageDataWrapper]) .index("stageId") .first(s.info.stageId) .last(s.info.stageId) .closeableIterator() val hasMoreAttempts = try { remainingAttempts.asScala.exists { other => other.info.attemptId != s.info.attemptId } } finally { remainingAttempts.close() } if (!hasMoreAttempts) { kvstore.delete(classOf[RDDOperationGraphWrapper], s.info.stageId) } cleanupCachedQuantiles(key) } // Delete tasks for all stages in one pass, as deleting them for each stage individually is slow val tasks = kvstore.view(classOf[TaskDataWrapper]).asScala val keys = stages.map { s => (s.info.stageId, s.info.attemptId) }.toSet tasks.foreach { t => if (keys.contains((t.stageId, t.stageAttemptId))) { kvstore.delete(t.getClass(), t.taskId) } } } private def cleanupTasks(stage: LiveStage): Unit = { val countToDelete = calculateNumberToRemove(stage.savedTasks.get(), maxTasksPerStage).toInt if (countToDelete > 0) { val stageKey = Array(stage.info.stageId, stage.info.attemptNumber) val view = kvstore.view(classOf[TaskDataWrapper]) .index(TaskIndexNames.COMPLETION_TIME) .parent(stageKey) // Try to delete finished tasks only. val toDelete = KVUtils.viewToSeq(view, countToDelete) { t => !live || t.status != TaskState.RUNNING.toString() } toDelete.foreach { t => kvstore.delete(t.getClass(), t.taskId) } stage.savedTasks.addAndGet(-toDelete.size) // If there are more running tasks than the configured limit, delete running tasks. This // should be extremely rare since the limit should generally far exceed the number of tasks // that can run in parallel. val remaining = countToDelete - toDelete.size if (remaining > 0) { val runningTasksToDelete = view.max(remaining).iterator().asScala.toList runningTasksToDelete.foreach { t => kvstore.delete(t.getClass(), t.taskId) } stage.savedTasks.addAndGet(-remaining) } // On live applications, cleanup any cached quantiles for the stage. This makes sure that // quantiles will be recalculated after tasks are replaced with newer ones. // // This is not needed in the SHS since caching only happens after the event logs are // completely processed. if (live) { cleanupCachedQuantiles(stageKey) } } stage.cleaning = false } private def cleanupCachedQuantiles(stageKey: Array[Int]): Unit = { val cachedQuantiles = kvstore.view(classOf[CachedQuantile]) .index("stage") .first(stageKey) .last(stageKey) .asScala .toList cachedQuantiles.foreach { q => kvstore.delete(q.getClass(), q.id) } } /** * Remove at least (retainedSize / 10) items to reduce friction. Because tracking may be done * asynchronously, this method may return 0 in case enough items have been deleted already. */ private def calculateNumberToRemove(dataSize: Long, retainedSize: Long): Long = { if (dataSize > retainedSize) { math.max(retainedSize / 10L, dataSize - retainedSize) } else { 0L } } }
ahnqirage/spark
core/src/main/scala/org/apache/spark/status/AppStatusListener.scala
Scala
apache-2.0
43,021
package org.jetbrains.plugins.scala.testingSupport.scalatest.generators import org.jetbrains.plugins.scala.testingSupport.scalatest.ScalaTestTestCase trait WordSpecGenerator extends ScalaTestTestCase { val wordSpecClassName = "WordSpecTest" val wordSpecFileName: String = wordSpecClassName + ".scala" addSourceFile(wordSpecFileName, s""" |import org.scalatest._ | |class $wordSpecClassName extends WordSpec { | "WordSpecTest" should { | "Run single test" in { | print("$TestOutputPrefix OK $TestOutputSuffix") | } | | "ignore other tests" in { | print("$TestOutputPrefix FAILED $TestOutputSuffix") | } | } | | "empty" should {} | | "outer" should { | "inner" in {} | } | | "tagged" should { | "be tagged" taggedAs(WordSpecTag) in {} | } |} | |object WordSpecTag extends Tag("MyTag") """.stripMargin.trim() ) }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/generators/WordSpecGenerator.scala
Scala
apache-2.0
1,041
package ru.maizy.cheesecake.server /** * Copyright (c) Nikita Kovaliov, maizy.ru, 2016 * See LICENSE.txt for details. */ import java.io.File case class ServerAppOptions( port: Int = ServerAppOptions.DEFAULT_PORT, host: String = ServerAppOptions.DEFAULT_HOST, config: Option[File] = None ) object ServerAppOptions { val DEFAULT_PORT = 52022 val DEFAULT_HOST = "localhost" } object OptionParser { private val parser = new scopt.OptionParser[ServerAppOptions](s"java -jar ${BuildInfo.projectName}.jar") { head("Cheesecake server", Version.toString) help("help") version("version") opt[String]('h', "host") .text(s"host to listen on, default: ${ServerAppOptions.DEFAULT_HOST}") .action { (value, c) => c.copy(host = value) } opt[Int]('p', "port") .text(s"port to listen on, default: ${ServerAppOptions.DEFAULT_PORT}") .action { (value, c) => c.copy(port = value) } opt[File]('c', "config") .text("additional config") .valueName("<file>") .action { (value, c) => c.copy(config = Some(value)) } } def parse(args: Seq[String]): Option[ServerAppOptions] = { val opts = parser.parse(args, ServerAppOptions()) val fails = Seq[Option[ServerAppOptions] => Boolean]( _.isEmpty ) if (fails.exists(_ (opts))) { parser.showUsageAsError None } else { opts } } }
maizy/cheesecake
server/src/main/scala/ru/maizy/cheesecake/server/Options.scala
Scala
apache-2.0
1,394
package com.twitter.finagle.partitioning import com.twitter.finagle.Addr /** * Cache node metadata consumed by clients that implement sharding (see * [[com.twitter.finagle.memcached.KetamaPartitionedClient]]). * * This class and its companion object are private because they are only an implementation detail for * converting between [[com.twitter.finagle.partitioning.CacheNode]] and * [[com.twitter.finagle.Address]]. We need to convert between these types for backwards * compatibility: [[com.twitter.finagle.KetamaPartitionedClient]] consumes * [[CacheNode]]s but [[com.twitter.finagle.Resolver]]s return * [[com.twitter.finagle.Address]]s. * * @param weight The weight of the cache node. Default value is 1. Note that this determines where * data is stored in the Ketama ring and is not interchangable with the notion of weight in * [[com.twitter.finagle.addr.WeightedAddress]], which pertains to load balancing. * @param key An optional unique identifier for the cache node (e.g. shard ID). */ private[finagle] case class CacheNodeMetadata(weight: Int, key: Option[String]) private[finagle] object CacheNodeMetadata { private val key = "cache_node_metadata" /** * Convert [[CacheNodeMetadata]] to an instance of * [[com.twitter.finagle.Addr.Metadata]]. */ def toAddrMetadata(metadata: CacheNodeMetadata): Addr.Metadata = Addr.Metadata(key -> metadata) /** * Convert [[com.twitter.finagle.Addr.Metadata]] to an instance of * [[CacheNodeMetadata]]. */ def fromAddrMetadata(metadata: Addr.Metadata): Option[CacheNodeMetadata] = metadata.get(key) match { case some @ Some(metadata: CacheNodeMetadata) => some.asInstanceOf[Option[CacheNodeMetadata]] case _ => None } /** * A variant of `fromAddrMetadata` that pattern matches against weight * and key of the [[CacheNodeMetadata]]. */ def unapply(metadata: Addr.Metadata): Option[(Int, Option[String])] = fromAddrMetadata(metadata).map { case CacheNodeMetadata(weight, key) => (weight, key) } }
luciferous/finagle
finagle-partitioning/src/main/scala/com/twitter/finagle/partitioning/CacheNodeMetadata.scala
Scala
apache-2.0
2,062
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.analysis import org.apache.spark.api.python.PythonEvalType import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.SubExprUtils._ import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression import org.apache.spark.sql.catalyst.optimizer.BooleanSimplification import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.types._ /** * Throws user facing errors when passed invalid queries that fail to analyze. */ trait CheckAnalysis extends PredicateHelper { /** * Override to provide additional checks for correct analysis. * These rules will be evaluated after our built-in check rules. */ val extendedCheckRules: Seq[LogicalPlan => Unit] = Nil protected def failAnalysis(msg: String): Nothing = { throw new AnalysisException(msg) } protected def containsMultipleGenerators(exprs: Seq[Expression]): Boolean = { exprs.flatMap(_.collect { case e: Generator => e }).length > 1 } protected def hasMapType(dt: DataType): Boolean = { dt.existsRecursively(_.isInstanceOf[MapType]) } protected def mapColumnInSetOperation(plan: LogicalPlan): Option[Attribute] = plan match { case _: Intersect | _: Except | _: Distinct => plan.output.find(a => hasMapType(a.dataType)) case d: Deduplicate => d.keys.find(a => hasMapType(a.dataType)) case _ => None } private def checkLimitClause(limitExpr: Expression): Unit = { limitExpr match { case e if !e.foldable => failAnalysis( "The limit expression must evaluate to a constant value, but got " + limitExpr.sql) case e if e.dataType != IntegerType => failAnalysis( s"The limit expression must be integer type, but got " + e.dataType.catalogString) case e => e.eval() match { case null => failAnalysis( s"The evaluated limit expression must not be null, but got ${limitExpr.sql}") case v: Int if v < 0 => failAnalysis( s"The limit expression must be equal to or greater than 0, but got $v") case _ => // OK } } } def checkAnalysis(plan: LogicalPlan): Unit = { // We transform up and order the rules so as to catch the first possible failure instead // of the result of cascading resolution failures. plan.foreachUp { case p if p.analyzed => // Skip already analyzed sub-plans case u: UnresolvedRelation => u.failAnalysis(s"Table or view not found: ${u.tableIdentifier}") case operator: LogicalPlan => // Check argument data types of higher-order functions downwards first. // If the arguments of the higher-order functions are resolved but the type check fails, // the argument functions will not get resolved, but we should report the argument type // check failure instead of claiming the argument functions are unresolved. operator transformExpressionsDown { case hof: HigherOrderFunction if hof.argumentsResolved && hof.checkArgumentDataTypes().isFailure => hof.checkArgumentDataTypes() match { case TypeCheckResult.TypeCheckFailure(message) => hof.failAnalysis( s"cannot resolve '${hof.sql}' due to argument data type mismatch: $message") } } operator transformExpressionsUp { case a: Attribute if !a.resolved => val from = operator.inputSet.map(_.qualifiedName).mkString(", ") a.failAnalysis(s"cannot resolve '${a.sql}' given input columns: [$from]") case e: Expression if e.checkInputDataTypes().isFailure => e.checkInputDataTypes() match { case TypeCheckResult.TypeCheckFailure(message) => e.failAnalysis( s"cannot resolve '${e.sql}' due to data type mismatch: $message") } case c: Cast if !c.resolved => failAnalysis(s"invalid cast from ${c.child.dataType.catalogString} to " + c.dataType.catalogString) case g: Grouping => failAnalysis("grouping() can only be used with GroupingSets/Cube/Rollup") case g: GroupingID => failAnalysis("grouping_id() can only be used with GroupingSets/Cube/Rollup") case w @ WindowExpression(AggregateExpression(_, _, true, _), _) => failAnalysis(s"Distinct window functions are not supported: $w") case w @ WindowExpression(_: OffsetWindowFunction, WindowSpecDefinition(_, order, frame: SpecifiedWindowFrame)) if order.isEmpty || !frame.isOffset => failAnalysis("An offset window function can only be evaluated in an ordered " + s"row-based window frame with a single offset: $w") case w @ WindowExpression(e, s) => // Only allow window functions with an aggregate expression or an offset window // function or a Pandas window UDF. e match { case _: AggregateExpression | _: OffsetWindowFunction | _: AggregateWindowFunction => w case f: PythonUDF if PythonUDF.isWindowPandasUDF(f) => w case _ => failAnalysis(s"Expression '$e' not supported within a window function.") } case s: SubqueryExpression => checkSubqueryExpression(operator, s) s } operator match { case etw: EventTimeWatermark => etw.eventTime.dataType match { case s: StructType if s.find(_.name == "end").map(_.dataType) == Some(TimestampType) => case _: TimestampType => case _ => failAnalysis( s"Event time must be defined on a window or a timestamp, but " + s"${etw.eventTime.name} is of type ${etw.eventTime.dataType.catalogString}") } case f: Filter if f.condition.dataType != BooleanType => failAnalysis( s"filter expression '${f.condition.sql}' " + s"of type ${f.condition.dataType.catalogString} is not a boolean.") case Filter(condition, _) if hasNullAwarePredicateWithinNot(condition) => failAnalysis("Null-aware predicate sub-queries cannot be used in nested " + s"conditions: $condition") case j @ Join(_, _, _, Some(condition)) if condition.dataType != BooleanType => failAnalysis( s"join condition '${condition.sql}' " + s"of type ${condition.dataType.catalogString} is not a boolean.") case Aggregate(groupingExprs, aggregateExprs, child) => def isAggregateExpression(expr: Expression) = { expr.isInstanceOf[AggregateExpression] || PythonUDF.isGroupedAggPandasUDF(expr) } def checkValidAggregateExpression(expr: Expression): Unit = expr match { case expr: Expression if isAggregateExpression(expr) => val aggFunction = expr match { case agg: AggregateExpression => agg.aggregateFunction case udf: PythonUDF => udf } aggFunction.children.foreach { child => child.foreach { case expr: Expression if isAggregateExpression(expr) => failAnalysis( s"It is not allowed to use an aggregate function in the argument of " + s"another aggregate function. Please use the inner aggregate function " + s"in a sub-query.") case other => // OK } if (!child.deterministic) { failAnalysis( s"nondeterministic expression ${expr.sql} should not " + s"appear in the arguments of an aggregate function.") } } case e: Attribute if groupingExprs.isEmpty => // Collect all [[AggregateExpressions]]s. val aggExprs = aggregateExprs.filter(_.collect { case a: AggregateExpression => a }.nonEmpty) failAnalysis( s"grouping expressions sequence is empty, " + s"and '${e.sql}' is not an aggregate function. " + s"Wrap '${aggExprs.map(_.sql).mkString("(", ", ", ")")}' in windowing " + s"function(s) or wrap '${e.sql}' in first() (or first_value) " + s"if you don't care which value you get." ) case e: Attribute if !groupingExprs.exists(_.semanticEquals(e)) => failAnalysis( s"expression '${e.sql}' is neither present in the group by, " + s"nor is it an aggregate function. " + "Add to group by or wrap in first() (or first_value) if you don't care " + "which value you get.") case e if groupingExprs.exists(_.semanticEquals(e)) => // OK case e => e.children.foreach(checkValidAggregateExpression) } def checkValidGroupingExprs(expr: Expression): Unit = { if (expr.find(_.isInstanceOf[AggregateExpression]).isDefined) { failAnalysis( "aggregate functions are not allowed in GROUP BY, but found " + expr.sql) } // Check if the data type of expr is orderable. if (!RowOrdering.isOrderable(expr.dataType)) { failAnalysis( s"expression ${expr.sql} cannot be used as a grouping expression " + s"because its data type ${expr.dataType.catalogString} is not an orderable " + s"data type.") } if (!expr.deterministic) { // This is just a sanity check, our analysis rule PullOutNondeterministic should // already pull out those nondeterministic expressions and evaluate them in // a Project node. failAnalysis(s"nondeterministic expression ${expr.sql} should not " + s"appear in grouping expression.") } } groupingExprs.foreach(checkValidGroupingExprs) aggregateExprs.foreach(checkValidAggregateExpression) case Sort(orders, _, _) => orders.foreach { order => if (!RowOrdering.isOrderable(order.dataType)) { failAnalysis( s"sorting is not supported for columns of type ${order.dataType.catalogString}") } } case GlobalLimit(limitExpr, _) => checkLimitClause(limitExpr) case LocalLimit(limitExpr, _) => checkLimitClause(limitExpr) case _: Union | _: SetOperation if operator.children.length > 1 => def dataTypes(plan: LogicalPlan): Seq[DataType] = plan.output.map(_.dataType) def ordinalNumber(i: Int): String = i match { case 0 => "first" case 1 => "second" case i => s"${i}th" } val ref = dataTypes(operator.children.head) operator.children.tail.zipWithIndex.foreach { case (child, ti) => // Check the number of columns if (child.output.length != ref.length) { failAnalysis( s""" |${operator.nodeName} can only be performed on tables with the same number |of columns, but the first table has ${ref.length} columns and |the ${ordinalNumber(ti + 1)} table has ${child.output.length} columns """.stripMargin.replace("\\n", " ").trim()) } // Check if the data types match. dataTypes(child).zip(ref).zipWithIndex.foreach { case ((dt1, dt2), ci) => // SPARK-18058: we shall not care about the nullability of columns if (TypeCoercion.findWiderTypeForTwo(dt1.asNullable, dt2.asNullable).isEmpty) { failAnalysis( s""" |${operator.nodeName} can only be performed on tables with the compatible |column types. ${dt1.catalogString} <> ${dt2.catalogString} at the |${ordinalNumber(ci)} column of the ${ordinalNumber(ti + 1)} table """.stripMargin.replace("\\n", " ").trim()) } } } case _ => // Fallbacks to the following checks } operator match { case o if o.children.nonEmpty && o.missingInput.nonEmpty => val missingAttributes = o.missingInput.mkString(",") val input = o.inputSet.mkString(",") val msgForMissingAttributes = s"Resolved attribute(s) $missingAttributes missing " + s"from $input in operator ${operator.simpleString}." val resolver = plan.conf.resolver val attrsWithSameName = o.missingInput.filter { missing => o.inputSet.exists(input => resolver(missing.name, input.name)) } val msg = if (attrsWithSameName.nonEmpty) { val sameNames = attrsWithSameName.map(_.name).mkString(",") s"$msgForMissingAttributes Attribute(s) with the same name appear in the " + s"operation: $sameNames. Please check if the right attribute(s) are used." } else { msgForMissingAttributes } failAnalysis(msg) case p @ Project(exprs, _) if containsMultipleGenerators(exprs) => failAnalysis( s"""Only a single table generating function is allowed in a SELECT clause, found: | ${exprs.map(_.sql).mkString(",")}""".stripMargin) case j: Join if !j.duplicateResolved => val conflictingAttributes = j.left.outputSet.intersect(j.right.outputSet) failAnalysis( s""" |Failure when resolving conflicting references in Join: |$plan |Conflicting attributes: ${conflictingAttributes.mkString(",")} |""".stripMargin) case i: Intersect if !i.duplicateResolved => val conflictingAttributes = i.left.outputSet.intersect(i.right.outputSet) failAnalysis( s""" |Failure when resolving conflicting references in Intersect: |$plan |Conflicting attributes: ${conflictingAttributes.mkString(",")} """.stripMargin) case e: Except if !e.duplicateResolved => val conflictingAttributes = e.left.outputSet.intersect(e.right.outputSet) failAnalysis( s""" |Failure when resolving conflicting references in Except: |$plan |Conflicting attributes: ${conflictingAttributes.mkString(",")} """.stripMargin) // TODO: although map type is not orderable, technically map type should be able to be // used in equality comparison, remove this type check once we support it. case o if mapColumnInSetOperation(o).isDefined => val mapCol = mapColumnInSetOperation(o).get failAnalysis("Cannot have map type columns in DataFrame which calls " + s"set operations(intersect, except, etc.), but the type of column ${mapCol.name} " + "is " + mapCol.dataType.catalogString) case o if o.expressions.exists(!_.deterministic) && !o.isInstanceOf[Project] && !o.isInstanceOf[Filter] && !o.isInstanceOf[Aggregate] && !o.isInstanceOf[Window] => // The rule above is used to check Aggregate operator. failAnalysis( s"""nondeterministic expressions are only allowed in |Project, Filter, Aggregate or Window, found: | ${o.expressions.map(_.sql).mkString(",")} |in operator ${operator.simpleString} """.stripMargin) case _: UnresolvedHint => throw new IllegalStateException( "Internal error: logical hint operator should have been removed during analysis") case _ => // Analysis successful! } } extendedCheckRules.foreach(_(plan)) plan.foreachUp { case o if !o.resolved => failAnalysis(s"unresolved operator ${o.simpleString}") case _ => } plan.setAnalyzed() } /** * Validates subquery expressions in the plan. Upon failure, returns an user facing error. */ private def checkSubqueryExpression(plan: LogicalPlan, expr: SubqueryExpression): Unit = { def checkAggregateInScalarSubquery( conditions: Seq[Expression], query: LogicalPlan, agg: Aggregate): Unit = { // Make sure correlated scalar subqueries contain one row for every outer row by // enforcing that they are aggregates containing exactly one aggregate expression. val aggregates = agg.expressions.flatMap(_.collect { case a: AggregateExpression => a }) if (aggregates.isEmpty) { failAnalysis("The output of a correlated scalar subquery must be aggregated") } // SPARK-18504/SPARK-18814: Block cases where GROUP BY columns // are not part of the correlated columns. val groupByCols = AttributeSet(agg.groupingExpressions.flatMap(_.references)) // Collect the local references from the correlated predicate in the subquery. val subqueryColumns = getCorrelatedPredicates(query).flatMap(_.references) .filterNot(conditions.flatMap(_.references).contains) val correlatedCols = AttributeSet(subqueryColumns) val invalidCols = groupByCols -- correlatedCols // GROUP BY columns must be a subset of columns in the predicates if (invalidCols.nonEmpty) { failAnalysis( "A GROUP BY clause in a scalar correlated subquery " + "cannot contain non-correlated columns: " + invalidCols.mkString(",")) } } // Skip subquery aliases added by the Analyzer. // For projects, do the necessary mapping and skip to its child. def cleanQueryInScalarSubquery(p: LogicalPlan): LogicalPlan = p match { case s: SubqueryAlias => cleanQueryInScalarSubquery(s.child) case p: Project => cleanQueryInScalarSubquery(p.child) case child => child } // Validate the subquery plan. checkAnalysis(expr.plan) expr match { case ScalarSubquery(query, conditions, _) => // Scalar subquery must return one column as output. if (query.output.size != 1) { failAnalysis( s"Scalar subquery must return only one column, but got ${query.output.size}") } if (conditions.nonEmpty) { cleanQueryInScalarSubquery(query) match { case a: Aggregate => checkAggregateInScalarSubquery(conditions, query, a) case Filter(_, a: Aggregate) => checkAggregateInScalarSubquery(conditions, query, a) case fail => failAnalysis(s"Correlated scalar subqueries must be aggregated: $fail") } // Only certain operators are allowed to host subquery expression containing // outer references. plan match { case _: Filter | _: Aggregate | _: Project => // Ok case other => failAnalysis( "Correlated scalar sub-queries can only be used in a " + s"Filter/Aggregate/Project: $plan") } } case inSubqueryOrExistsSubquery => plan match { case _: Filter => // Ok case _ => failAnalysis(s"IN/EXISTS predicate sub-queries can only be used in a Filter: $plan") } } // Validate to make sure the correlations appearing in the query are valid and // allowed by spark. checkCorrelationsInSubquery(expr.plan) } /** * Validates to make sure the outer references appearing inside the subquery * are allowed. */ private def checkCorrelationsInSubquery(sub: LogicalPlan): Unit = { // Validate that correlated aggregate expression do not contain a mixture // of outer and local references. def checkMixedReferencesInsideAggregateExpr(expr: Expression): Unit = { expr.foreach { case a: AggregateExpression if containsOuter(a) => val outer = a.collect { case OuterReference(e) => e.toAttribute } val local = a.references -- outer if (local.nonEmpty) { val msg = s""" |Found an aggregate expression in a correlated predicate that has both |outer and local references, which is not supported yet. |Aggregate expression: ${SubExprUtils.stripOuterReference(a).sql}, |Outer references: ${outer.map(_.sql).mkString(", ")}, |Local references: ${local.map(_.sql).mkString(", ")}. """.stripMargin.replace("\\n", " ").trim() failAnalysis(msg) } case _ => } } // Make sure a plan's subtree does not contain outer references def failOnOuterReferenceInSubTree(p: LogicalPlan): Unit = { if (hasOuterReferences(p)) { failAnalysis(s"Accessing outer query column is not allowed in:\\n$p") } } // Make sure a plan's expressions do not contain : // 1. Aggregate expressions that have mixture of outer and local references. // 2. Expressions containing outer references on plan nodes other than Filter. def failOnInvalidOuterReference(p: LogicalPlan): Unit = { p.expressions.foreach(checkMixedReferencesInsideAggregateExpr) if (!p.isInstanceOf[Filter] && p.expressions.exists(containsOuter)) { failAnalysis( "Expressions referencing the outer query are not supported outside of WHERE/HAVING " + s"clauses:\\n$p") } } // SPARK-17348: A potential incorrect result case. // When a correlated predicate is a non-equality predicate, // certain operators are not permitted from the operator // hosting the correlated predicate up to the operator on the outer table. // Otherwise, the pull up of the correlated predicate // will generate a plan with a different semantics // which could return incorrect result. // Currently we check for Aggregate and Window operators // // Below shows an example of a Logical Plan during Analyzer phase that // show this problem. Pulling the correlated predicate [outer(c2#77) >= ..] // through the Aggregate (or Window) operator could alter the result of // the Aggregate. // // Project [c1#76] // +- Project [c1#87, c2#88] // : (Aggregate or Window operator) // : +- Filter [outer(c2#77) >= c2#88)] // : +- SubqueryAlias t2, `t2` // : +- Project [_1#84 AS c1#87, _2#85 AS c2#88] // : +- LocalRelation [_1#84, _2#85] // +- SubqueryAlias t1, `t1` // +- Project [_1#73 AS c1#76, _2#74 AS c2#77] // +- LocalRelation [_1#73, _2#74] def failOnNonEqualCorrelatedPredicate(found: Boolean, p: LogicalPlan): Unit = { if (found) { // Report a non-supported case as an exception failAnalysis(s"Correlated column is not allowed in a non-equality predicate:\\n$p") } } var foundNonEqualCorrelatedPred: Boolean = false // Simplify the predicates before validating any unsupported correlation patterns in the plan. AnalysisHelper.allowInvokingTransformsInAnalyzer { BooleanSimplification(sub).foreachUp { // Whitelist operators allowed in a correlated subquery // There are 4 categories: // 1. Operators that are allowed anywhere in a correlated subquery, and, // by definition of the operators, they either do not contain // any columns or cannot host outer references. // 2. Operators that are allowed anywhere in a correlated subquery // so long as they do not host outer references. // 3. Operators that need special handlings. These operators are // Filter, Join, Aggregate, and Generate. // // Any operators that are not in the above list are allowed // in a correlated subquery only if they are not on a correlation path. // In other word, these operators are allowed only under a correlation point. // // A correlation path is defined as the sub-tree of all the operators that // are on the path from the operator hosting the correlated expressions // up to the operator producing the correlated values. // Category 1: // ResolvedHint, Distinct, LeafNode, Repartition, and SubqueryAlias case _: ResolvedHint | _: Distinct | _: LeafNode | _: Repartition | _: SubqueryAlias => // Category 2: // These operators can be anywhere in a correlated subquery. // so long as they do not host outer references in the operators. case p: Project => failOnInvalidOuterReference(p) case s: Sort => failOnInvalidOuterReference(s) case r: RepartitionByExpression => failOnInvalidOuterReference(r) // Category 3: // Filter is one of the two operators allowed to host correlated expressions. // The other operator is Join. Filter can be anywhere in a correlated subquery. case f: Filter => val (correlated, _) = splitConjunctivePredicates(f.condition).partition(containsOuter) // Find any non-equality correlated predicates foundNonEqualCorrelatedPred = foundNonEqualCorrelatedPred || correlated.exists { case _: EqualTo | _: EqualNullSafe => false case _ => true } failOnInvalidOuterReference(f) // Aggregate cannot host any correlated expressions // It can be on a correlation path if the correlation contains // only equality correlated predicates. // It cannot be on a correlation path if the correlation has // non-equality correlated predicates. case a: Aggregate => failOnInvalidOuterReference(a) failOnNonEqualCorrelatedPredicate(foundNonEqualCorrelatedPred, a) // Join can host correlated expressions. case j @ Join(left, right, joinType, _) => joinType match { // Inner join, like Filter, can be anywhere. case _: InnerLike => failOnInvalidOuterReference(j) // Left outer join's right operand cannot be on a correlation path. // LeftAnti and ExistenceJoin are special cases of LeftOuter. // Note that ExistenceJoin cannot be expressed externally in both SQL and DataFrame // so it should not show up here in Analysis phase. This is just a safety net. // // LeftSemi does not allow output from the right operand. // Any correlated references in the subplan // of the right operand cannot be pulled up. case LeftOuter | LeftSemi | LeftAnti | ExistenceJoin(_) => failOnInvalidOuterReference(j) failOnOuterReferenceInSubTree(right) // Likewise, Right outer join's left operand cannot be on a correlation path. case RightOuter => failOnInvalidOuterReference(j) failOnOuterReferenceInSubTree(left) // Any other join types not explicitly listed above, // including Full outer join, are treated as Category 4. case _ => failOnOuterReferenceInSubTree(j) } // Generator with join=true, i.e., expressed with // LATERAL VIEW [OUTER], similar to inner join, // allows to have correlation under it // but must not host any outer references. // Note: // Generator with requiredChildOutput.isEmpty is treated as Category 4. case g: Generate if g.requiredChildOutput.nonEmpty => failOnInvalidOuterReference(g) // Category 4: Any other operators not in the above 3 categories // cannot be on a correlation path, that is they are allowed only // under a correlation point but they and their descendant operators // are not allowed to have any correlated expressions. case p => failOnOuterReferenceInSubTree(p) }} } }
mdespriee/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
Scala
apache-2.0
29,544
/* sbt -- Simple Build Tool * Copyright 2010 Mark Harrah */ package sbt import std._ import xsbt.api.{ Discovered, Discovery } import inc.Analysis import TaskExtra._ import Types._ import xsbti.api.Definition import ConcurrentRestrictions.Tag import testing.{ AnnotatedFingerprint, Fingerprint, Framework, SubclassFingerprint, Runner, TaskDef, SuiteSelector, Task => TestTask } import scala.annotation.tailrec import java.io.File sealed trait TestOption object Tests { /** * The result of a test run. * * @param overall The overall result of execution across all tests for all test frameworks in this test run. * @param events The result of each test group (suite) executed during this test run. * @param summaries Explicit summaries directly provided by test frameworks. This may be empty, in which case a default summary will be generated. */ final case class Output(overall: TestResult.Value, events: Map[String, SuiteResult], summaries: Iterable[Summary]) /** * Summarizes a test run. * * @param name The name of the test framework providing this summary. * @param summaryText The summary message for tests run by the test framework. */ final case class Summary(name: String, summaryText: String) /** * Defines a TestOption that will evaluate `setup` before any tests execute. * The ClassLoader provided to `setup` is the loader containing the test classes that will be run. * Setup is not currently performed for forked tests. */ final case class Setup(setup: ClassLoader => Unit) extends TestOption /** * Defines a TestOption that will evaluate `setup` before any tests execute. * Setup is not currently performed for forked tests. */ def Setup(setup: () => Unit) = new Setup(_ => setup()) /** * Defines a TestOption that will evaluate `cleanup` after all tests execute. * The ClassLoader provided to `cleanup` is the loader containing the test classes that ran. * Cleanup is not currently performed for forked tests. */ final case class Cleanup(cleanup: ClassLoader => Unit) extends TestOption /** * Defines a TestOption that will evaluate `cleanup` after all tests execute. * Cleanup is not currently performed for forked tests. */ def Cleanup(cleanup: () => Unit) = new Cleanup(_ => cleanup()) /** The names of tests to explicitly exclude from execution. */ final case class Exclude(tests: Iterable[String]) extends TestOption final case class Listeners(listeners: Iterable[TestReportListener]) extends TestOption /** Selects tests by name to run. Only tests for which `filterTest` returns true will be run. */ final case class Filter(filterTest: String => Boolean) extends TestOption /** Test execution will be ordered by the position of the matching filter. */ final case class Filters(filterTest: Seq[String => Boolean]) extends TestOption /** Defines a TestOption that passes arguments `args` to all test frameworks. */ def Argument(args: String*): Argument = Argument(None, args.toList) /** Defines a TestOption that passes arguments `args` to only the test framework `tf`. */ def Argument(tf: TestFramework, args: String*): Argument = Argument(Some(tf), args.toList) /** * Defines arguments to pass to test frameworks. * * @param framework The test framework the arguments apply to if one is specified in Some. * If None, the arguments will apply to all test frameworks. * @param args The list of arguments to pass to the selected framework(s). */ final case class Argument(framework: Option[TestFramework], args: List[String]) extends TestOption /** * Configures test execution. * * @param options The options to apply to this execution, including test framework arguments, filters, * and setup and cleanup work. * @param parallel If true, execute each unit of work returned by the test frameworks in separate sbt.Tasks. * If false, execute all work in a single sbt.Task. * @param tags The tags that should be added to each test task. These can be used to apply restrictions on * concurrent execution. */ final case class Execution(options: Seq[TestOption], parallel: Boolean, tags: Seq[(Tag, Int)]) /** Configures whether a group of tests runs in the same JVM or are forked. */ sealed trait TestRunPolicy /** Configures a group of tests to run in the same JVM. */ case object InProcess extends TestRunPolicy /** Configures a group of tests to be forked in a new JVM with forking options specified by `config`. */ final case class SubProcess(config: ForkOptions) extends TestRunPolicy object SubProcess { @deprecated("Construct SubProcess with a ForkOptions argument.", "0.13.0") def apply(javaOptions: Seq[String]): SubProcess = SubProcess(ForkOptions(runJVMOptions = javaOptions)) } /** A named group of tests configured to run in the same JVM or be forked. */ final case class Group(name: String, tests: Seq[TestDefinition], runPolicy: TestRunPolicy) private[sbt] final class ProcessedOptions( val tests: Seq[TestDefinition], val setup: Seq[ClassLoader => Unit], val cleanup: Seq[ClassLoader => Unit], val testListeners: Seq[TestReportListener]) private[sbt] def processOptions(config: Execution, discovered: Seq[TestDefinition], log: Logger): ProcessedOptions = { import collection.mutable.{ HashSet, ListBuffer, Map, Set } val testFilters = new ListBuffer[String => Boolean] var orderedFilters = Seq[String => Boolean]() val excludeTestsSet = new HashSet[String] val setup, cleanup = new ListBuffer[ClassLoader => Unit] val testListeners = new ListBuffer[TestReportListener] val undefinedFrameworks = new ListBuffer[String] for (option <- config.options) { option match { case Filter(include) => testFilters += include case Filters(includes) => if (!orderedFilters.isEmpty) sys.error("Cannot define multiple ordered test filters.") else orderedFilters = includes case Exclude(exclude) => excludeTestsSet ++= exclude case Listeners(listeners) => testListeners ++= listeners case Setup(setupFunction) => setup += setupFunction case Cleanup(cleanupFunction) => cleanup += cleanupFunction case a: Argument => // now handled by whatever constructs `runners` } } if (excludeTestsSet.size > 0) log.debug(excludeTestsSet.mkString("Excluding tests: \\n\\t", "\\n\\t", "")) if (undefinedFrameworks.size > 0) log.warn("Arguments defined for test frameworks that are not present:\\n\\t" + undefinedFrameworks.mkString("\\n\\t")) def includeTest(test: TestDefinition) = !excludeTestsSet.contains(test.name) && testFilters.forall(filter => filter(test.name)) val filtered0 = discovered.filter(includeTest).toList.distinct val tests = if (orderedFilters.isEmpty) filtered0 else orderedFilters.flatMap(f => filtered0.filter(d => f(d.name))).toList.distinct new ProcessedOptions(tests, setup.toList, cleanup.toList, testListeners.toList) } def apply(frameworks: Map[TestFramework, Framework], testLoader: ClassLoader, runners: Map[TestFramework, Runner], discovered: Seq[TestDefinition], config: Execution, log: Logger): Task[Output] = { val o = processOptions(config, discovered, log) testTask(testLoader, frameworks, runners, o.tests, o.setup, o.cleanup, log, o.testListeners, config) } def testTask(loader: ClassLoader, frameworks: Map[TestFramework, Framework], runners: Map[TestFramework, Runner], tests: Seq[TestDefinition], userSetup: Iterable[ClassLoader => Unit], userCleanup: Iterable[ClassLoader => Unit], log: Logger, testListeners: Seq[TestReportListener], config: Execution): Task[Output] = { def fj(actions: Iterable[() => Unit]): Task[Unit] = nop.dependsOn(actions.toSeq.fork(_()): _*) def partApp(actions: Iterable[ClassLoader => Unit]) = actions.toSeq map { a => () => a(loader) } val (frameworkSetup, runnables, frameworkCleanup) = TestFramework.testTasks(frameworks, runners, loader, tests, log, testListeners) val setupTasks = fj(partApp(userSetup) :+ frameworkSetup) val mainTasks = if (config.parallel) makeParallel(loader, runnables, setupTasks, config.tags) //.toSeq.join else makeSerial(loader, runnables, setupTasks, config.tags) val taggedMainTasks = mainTasks.tagw(config.tags: _*) taggedMainTasks map processResults flatMap { results => val cleanupTasks = fj(partApp(userCleanup) :+ frameworkCleanup(results.overall)) cleanupTasks map { _ => results } } } type TestRunnable = (String, TestFunction) private def createNestedRunnables(loader: ClassLoader, testFun: TestFunction, nestedTasks: Seq[TestTask]): Seq[(String, TestFunction)] = nestedTasks.view.zipWithIndex map { case (nt, idx) => val testFunDef = testFun.taskDef (testFunDef.fullyQualifiedName, TestFramework.createTestFunction(loader, new TaskDef(testFunDef.fullyQualifiedName + "-" + idx, testFunDef.fingerprint, testFunDef.explicitlySpecified, testFunDef.selectors), testFun.runner, nt)) } def makeParallel(loader: ClassLoader, runnables: Iterable[TestRunnable], setupTasks: Task[Unit], tags: Seq[(Tag, Int)]): Task[Map[String, SuiteResult]] = toTasks(loader, runnables.toSeq, tags).dependsOn(setupTasks) def toTasks(loader: ClassLoader, runnables: Seq[TestRunnable], tags: Seq[(Tag, Int)]): Task[Map[String, SuiteResult]] = { val tasks = runnables.map { case (name, test) => toTask(loader, name, test, tags) } tasks.join.map(_.foldLeft(Map.empty[String, SuiteResult]) { case (sum, e) => val merged = sum.toSeq ++ e.toSeq val grouped = merged.groupBy(_._1) grouped.mapValues(_.map(_._2).foldLeft(SuiteResult.Empty) { case (resultSum, result) => resultSum + result }) }) } def toTask(loader: ClassLoader, name: String, fun: TestFunction, tags: Seq[(Tag, Int)]): Task[Map[String, SuiteResult]] = { val base = task { (name, fun.apply()) } val taggedBase = base.tagw(tags: _*).tag(fun.tags.map(ConcurrentRestrictions.Tag(_)): _*) taggedBase flatMap { case (name, (result, nested)) => val nestedRunnables = createNestedRunnables(loader, fun, nested) toTasks(loader, nestedRunnables, tags).map { currentResultMap => val newResult = currentResultMap.get(name) match { case Some(currentResult) => currentResult + result case None => result } currentResultMap.updated(name, newResult) } } } def makeSerial(loader: ClassLoader, runnables: Seq[TestRunnable], setupTasks: Task[Unit], tags: Seq[(Tag, Int)]): Task[List[(String, SuiteResult)]] = { @tailrec def processRunnable(runnableList: List[TestRunnable], acc: List[(String, SuiteResult)]): List[(String, SuiteResult)] = runnableList match { case hd :: rst => val testFun = hd._2 val (result, nestedTasks) = testFun.apply() val nestedRunnables = createNestedRunnables(loader, testFun, nestedTasks) processRunnable(nestedRunnables.toList ::: rst, (hd._1, result) :: acc) case Nil => acc } task { processRunnable(runnables.toList, List.empty) } dependsOn (setupTasks) } def processResults(results: Iterable[(String, SuiteResult)]): Output = Output(overall(results.map(_._2.result)), results.toMap, Iterable.empty) def foldTasks(results: Seq[Task[Output]], parallel: Boolean): Task[Output] = if (results.isEmpty) task { Output(TestResult.Passed, Map.empty, Nil) } else if (parallel) reduced(results.toIndexedSeq, { case (Output(v1, m1, _), Output(v2, m2, _)) => Output(if (v1.id < v2.id) v2 else v1, m1 ++ m2, Iterable.empty) }) else { def sequence(tasks: List[Task[Output]], acc: List[Output]): Task[List[Output]] = tasks match { case Nil => task(acc.reverse) case hd :: tl => hd flatMap { out => sequence(tl, out :: acc) } } sequence(results.toList, List()) map { ress => val (rs, ms) = ress.unzip { e => (e.overall, e.events) } Output(overall(rs), ms reduce (_ ++ _), Iterable.empty) } } def overall(results: Iterable[TestResult.Value]): TestResult.Value = (TestResult.Passed /: results) { (acc, result) => if (acc.id < result.id) result else acc } def discover(frameworks: Seq[Framework], analysis: Analysis, log: Logger): (Seq[TestDefinition], Set[String]) = discover(frameworks flatMap TestFramework.getFingerprints, allDefs(analysis), log) def allDefs(analysis: Analysis) = analysis.apis.internal.values.flatMap(_.api.definitions).toSeq def discover(fingerprints: Seq[Fingerprint], definitions: Seq[Definition], log: Logger): (Seq[TestDefinition], Set[String]) = { val subclasses = fingerprints collect { case sub: SubclassFingerprint => (sub.superclassName, sub.isModule, sub) }; val annotations = fingerprints collect { case ann: AnnotatedFingerprint => (ann.annotationName, ann.isModule, ann) }; log.debug("Subclass fingerprints: " + subclasses) log.debug("Annotation fingerprints: " + annotations) def firsts[A, B, C](s: Seq[(A, B, C)]): Set[A] = s.map(_._1).toSet def defined(in: Seq[(String, Boolean, Fingerprint)], names: Set[String], IsModule: Boolean): Seq[Fingerprint] = in collect { case (name, IsModule, print) if names(name) => print } def toFingerprints(d: Discovered): Seq[Fingerprint] = defined(subclasses, d.baseClasses, d.isModule) ++ defined(annotations, d.annotations, d.isModule) val discovered = Discovery(firsts(subclasses), firsts(annotations))(definitions) // TODO: To pass in correct explicitlySpecified and selectors val tests = for ((df, di) <- discovered; fingerprint <- toFingerprints(di)) yield new TestDefinition(df.name, fingerprint, false, Array(new SuiteSelector)) val mains = discovered collect { case (df, di) if di.hasMain => df.name } (tests, mains.toSet) } @deprecated("Tests.showResults() has been superseded with TestResultLogger and setting 'testResultLogger'.", "0.13.5") def showResults(log: Logger, results: Output, noTestsMessage: => String): Unit = TestResultLogger.Default.copy(printNoTests = TestResultLogger.const(_ info noTestsMessage)) .run(log, results, "") } final class TestsFailedException extends RuntimeException("Tests unsuccessful") with FeedbackProvidedException
niktrop/sbt
main/actions/src/main/scala/sbt/Tests.scala
Scala
bsd-3-clause
14,782
package debox import scala.reflect.ClassTag import scala.{specialized => sp} import spire.algebra._ import spire.math.QuickSort import spire.syntax.all._ /** * Buffer is a mutable, indexed sequence of values. * * Buffer wraps an underlying array, which provides constant-time * lookups, updates, and length checks. Values can be appended to or * popped from the end of the buffer in amortized constant time. Other * operations, such as insert, prepend, and will be linear. * * In cases where the type A is known (or the caller is specialized on * A), Buffer[A] will store the values in an unboxed array, and will * not box values on access or update. To aid in specialization and to * avoid inheriting bogus methods, Buffer intentionally does not * implement any of Scala's collection traits. * * For interop purposes, the toIterable method wraps a buffer in a * collections-compatible Iterable[A]. Buffer's iterator method * returns an Iterator[A], and the conversion methods .toArray, * toVector, and toList are also available. * * To facilitate inlining, Buffer's internals are public. However, you * should refrain from accessing or modifying these values unless you * know what you are doing. * * Furthermore, since Buffer is really only useful in cases where you * care about space efficiency and performance, Buffer declines to do * error-checking above what is provided by the underlying array, or * which is necessary to avoid corruption. This means that if you try * to access an element beyond the Buffer's length, you are not * guaranteed to get an exception (although you will in many * cases). This is by design. However, calls which modify the buffer * using an invalid index are guaranteed not to corrupt the buffer. * * Finally, there is no attempt made to provide any kind of thread * safety or protection against concurrent updates. Modify a Buffer * during foreach, map, iterator, etc will produce undefined results. */ final class Buffer[@sp A](arr: Array[A], n: Int)(implicit val ct: ClassTag[A]) extends Serializable { lhs => var elems: Array[A] = arr var len: Int = n /** * Check if two Buffers are equal. * * Equal means the buffers have the same type (which is checked * using the ClassTag instances) and the same contents. * * Comparing Buffers with any of Scala's collection types will * return false. */ override def equals(that: Any): Boolean = that match { case b: Buffer[_] => if (length != b.length || ct != b.ct) return false val buf = b.asInstanceOf[Buffer[A]] val limit = len cfor(0)(_ < limit, _ + 1) { i => if (elems(i) != buf.elems(i)) return false } true case _ => false } /** * Hash the contents of the buffer to an Int value. */ override def hashCode: Int = { var code: Int = 0xf457f00d val limit = len cfor(0)(_ < limit, _ + 1) { i => code = (code * 19) + elems(i).## } code } /** * Return a string representation of the contents of the buffer. */ override def toString = if (length == 0) { "Buffer()" } else { val limit = len val sb = new StringBuilder() sb.append("Buffer(") sb.append(apply(0)) cfor(1)(_ < limit, _ + 1) { i => sb.append(", ") sb.append(elems(i)) } sb.append(")") sb.toString } /** * Copy the buffer's contents to a new buffer. */ final def copy(): Buffer[A] = new Buffer(elems.clone, len) /** * Aborb the given buffer's contents into this buffer. * * This method does not copy the other buffer's contents. Thus, this * should only be used when there are no saved references to the * other Buffer. It is private, and exists primarily to simplify the * implementation of certain methods. */ private[this] def absorb(that: Buffer[A]): Unit = { elems = that.elems len = that.len } /** * Given delta, a change in the buffer's length, determine if the * underlying array needs to be grown. If this is necessary, do * it. Otherwise, return. * * This is an amortized O(1) operation; most calls will simply * return without growing. */ private[this] def growIfNecessary(delta: Int): Unit1[A] = { val goal = len + delta val n = elems.length if (n >= goal) return null var x = if (n == 0) 8 else Util.nextPowerOfTwo(n + 1) while (x >= 0 && x < goal) x = Util.nextPowerOfTwo(x + 1) if (x < 0) throw DeboxOverflowError(x) grow(x) null } /** * Grow the underlying array to accomodate n elements. * * In order to amortize the cost of growth, we need to double the * size of the underlying array each time, so that additional * resizes become less and less frequent as the buffer is added to. * * Growing is an O(n) operation, where n is buffer.length. */ private[this] def grow(n: Int): Unit1[A] = { val arr = new Array[A](n) System.arraycopy(elems, 0, arr, 0, len) elems = arr null } /** * Return the length of this Buffer as an Int. * * Since Buffers wrap arrays, their size is limited to what a 32-bit * signed integer can represent. In general Buffer should only be * used for sequences that are small enough to easily fit into * contiguous memory--larger sequences would benefit from block * layouts, serialization to/from disk, and other strategies that * Buffer does not provide. * * This is an O(1) operation. */ def length: Int = len /** * Return true if the Buffer is empty, false otherwise. * * This is an O(1) operation. */ def isEmpty: Boolean = len == 0 /** * Return true if the Buffer is non-empty, false otherwise. * * This is an O(1) operation. */ def nonEmpty: Boolean = len > 0 /** * Return the value at element i. * * As noted above, this method may throw an * ArrayIndexOutOfBoundsException if i is too large. However, if i * is larger than the buffer's length, but fits in the underlying * array, a garbage value will be returned instead. Be careful! * * This is an O(1) operation. */ def apply(i: Int): A = elems(i) /** * Update the value of element i. * * This method has similar caveats to apply. If an illegal i value * is used, an ArrayIndexOutOfBoundsException may be thrown. If no * exception is thrown, the update will have been ignored. Under no * circumstances will an invalid index corrupt the buffer. * * This is an O(1) operation. */ def update(i: Int, a: A): Unit = elems(i) = a /** * This method is a synonym for append. */ def append(a: A): Unit = this += a /** * Append a new value to the end of the buffer. * * If there is no space left in the underlying array this method * will trigger a grow, increasing the underlying storage * capacity. * * This is an amortized O(1) operation. */ def +=(a: A): Unit = { val n = len if (n >= elems.length) grow(Util.nextPowerOfTwo(n + 1)) elems(n) = a len = n + 1 } /** * Insert a new value at index i. * * For i values that are negative, or greater than the length of the * buffer, an exception will be thrown. If i == buffer.length, the * value will be appended to the end. Otherwise, this method will * shift the values at i and beyond forward to make room. * * This is an O(n) operation, where n is buffer.length. */ def insert(i: Int, a: A): Unit = if (i < 0 || i > len) { throw new IllegalArgumentException(i.toString) } else if (i == len) { append(a) } else { growIfNecessary(1) System.arraycopy(elems, i, elems, i + 1, len - i) elems(i) = a len += 1 } /** * Insert a new value at the beginning of the buffer. * * This method will shift the contents of the buffer forward to make * space for the new value. * * This is an O(n) operation, where n is buffer.length. */ def prepend(a: A): Unit = insert(0, a) /** * This is a synonym for ++. */ def concat(buf: Buffer[A]): Buffer[A] = this ++ buf /** * Concatenate two buffers, returning a new buffer. * * This method does not modify either input buffer, but allocates * and returns a new one. * * This is an O(n+m) operation, where n and m are the lengths of the * input buffers. */ def ++(buf: Buffer[A]): Buffer[A] = { val result = this.copy; result ++= buf; result } /** * This is a synonym for ++=. */ def extend(arr: Array[A]): Unit = this ++= arr /** * This is a synonym for extend. */ def extend(buf: Buffer[A]): Unit = this ++= buf /** * This is a synonym for extend. */ def extend(items: Iterable[A]): Unit = this ++= items /** * Append the values in arr to the end of the buffer. * * This method is an O(m) operation, where m is the length of arr. */ def ++=(arr: Array[A]): Unit = splice(len, arr) /** * Append the values in buf to the end of the buffer. * * This method is an O(m) operation, where m is the length of buf. */ def ++=(buf: Buffer[A]): Unit = splice(len, buf) /** * Append the values in elems to the end of the buffer. * * This method is an O(m) operation, where m is the length of items. */ def ++=(items: Iterable[A]): Unit = items.foreach(append) /** * Splice the values in arr into the buffer at index i. * * If i is negative or greater than buffer.length, an exception will * be thrown. If i is equal to buffer.length, the buffer will be * extended with arr. Otherwise, this method will shift the elements * at i and beyond forward to make room for arr's elements. Thus, * the size of the buffer will increase by arr.length. * * This method is an O(m+n) operation, where m is the length of arr, * and n is the length of the buffer. */ def splice(i: Int, arr: Array[A]): Unit = if (i < 0 || i > len) { throw new IllegalArgumentException(i.toString) } else { val n = arr.length growIfNecessary(n) if (i < len) System.arraycopy(elems, i, elems, i + n, len - i) System.arraycopy(arr, 0, elems, i, n) len += n } /** * Splice the values in buf into the buffer at index i. * * If i is negative or greater than buffer.length, an exception will * be thrown. If i is equal to buffer.length, the buffer will be * extended with buf. Otherwise, this method will shift the elements * at i and beyond forward to make room for buf's elements. Thus, * the size of the buffer will increase by buf.length. * * This method is an O(m+n) operation, where m is the length of buf, * and n is the length of the buffer. */ def splice(i: Int, buf: Buffer[A]): Unit = if (i < 0 || i > len) { throw new IllegalArgumentException(i.toString) } else { val n = buf.length growIfNecessary(n) if (i < len) System.arraycopy(elems, i, elems, i + n, len - i) System.arraycopy(buf.elems, 0, elems, i, n) len += n } /** * Prepend the values from arr into the beginning of the buffer. * * Like splice, this method will shift all the buffer's values back * to make room. * * This method is an O(m+n) operation, where m is the length of arr, * and n is the lenght of the buffer. */ def prependAll(arr: Array[A]): Unit = splice(0, arr) /** * Prepend the values from arr into the beginning of the buffer. * * Like splice, this method will shift all the buffer's values back * to make room. * * This method is an O(m+n) operation, where m is the length of arr, * and n is the lenght of the buffer. */ def prependAll(buf: Buffer[A]): Unit = splice(0, buf) /** * Remove the element at i, returning the value removed. * * This method verifies that the index i is valid; if not, it will * throw an exception. * * This method is an O(n) operation, where n is buffer.length. * Removing the last element of the buffer is O(1) operation, and * can also be accomplished with pop. */ def remove(i: Int): A = { val last = len - 1 if (i < 0) { throw new IndexOutOfBoundsException(i.toString) } else if (i < last) { System.arraycopy(elems, i + 1, elems, i, last - i) val a = elems(last) elems(last) = null.asInstanceOf[A] len = last a } else if (i == last) { pop } else { throw new IndexOutOfBoundsException(i.toString) } } /** * Remove the last element, returning the value returned. * * If the buffer is empty, this method throws an exception. * * This method is an O(1) operation. */ def pop(): A = if (len > 0) { val last = len - 1 val a = elems(last) len = last a } else { throw new IndexOutOfBoundsException("0") } /** * Clears the buffer's internal state. * * After calling this method, the buffer's state is identical to * that obtained by calling Buffer.empty[A]. * * The previous array is not retained, and will become available for * garbage collection. * * This is an O(1) operation. */ def clear(): Unit1[A] = { absorb(Buffer.empty[A]); null } /** * Compacts the buffer's internal array to remove extra free space. * * This operation should be used it a buffer is not likely to grow * again, and the user wants to free any additional memory that may * be available. * * In general, a buffer that has only grown will use 1-2x of its * apparent size. Buffers that have been reduced in size may be * using up to 4x the apparent size. */ def compact(): Unit1[A] = { if (len < elems.length) { val arr = new Array[A](len) System.arraycopy(elems, 0, arr, 0, len) elems = arr } null } /** * Return a new buffer which consists of the elements [i, j). * * The slice is half-open: the resulting buffer will include element * i but not element j. In other words, the new buffer will have * length (j - i). * * If i and j are not valid indices in the buffer, or if i > j, this * method will throw an exception. * * This is an O(j - i) operation. */ def slice(i: Int, j: Int): Buffer[A] = { if (0 > i || i > j || j > len) throw new IllegalArgumentException("(%s, %s)" format (i, j)) val n = j - i val arr = new Array[A](n) System.arraycopy(elems, i, arr, 0, n) new Buffer(arr, n) } /** * Return a new buffer with this buffer's elements in reverse order. * * This is an O(n) method, where n is buffer.length. */ def reverse(): Buffer[A] = { val arr = new Array[A](elems.length) var i = 0 var j = len - 1 val limit = len while (i < limit) { arr(j) = elems(i) i += 1 j -= 1 } new Buffer(arr, len) } /** * Return an iterator over this buffer's contents. * * This method does not do any copying or locking. Thus, if the * buffer is modified while the iterator is "live" the results will * be undefined and probably bad. * * Use this.copy.iterator to get a "clean" iterator if needed. * * Creating the iterator is an O(1) operation. */ def iterator(): Iterator[A] = elems.iterator.take(len) /** * Loop over the buffer's contents, appying f to each element. * * This is an O(n) operation, where n is the length of the buffer. */ def foreach(f: Function[A, Unit]): Unit = { val limit = len cfor(0)(_ < limit, _ + 1) { i => f(elems(i)) } } /** * Map this buffer's contents into a new buffer using f. * * This is an O(n) operation, where n is the length of the buffer. */ def map[@sp B: ClassTag](f: A => B): Buffer[B] = { val arr = new Array[B](len) val limit = len cfor(0)(_ < limit, _ + 1) { i => arr(i) = f(elems(i)) } new Buffer(arr, len) } /** * Add the buffer contents together, returning their sum. */ def sum(implicit ev: AdditiveMonoid[A]): A = { val limit = len var result: A = ev.zero cfor(0)(_ < limit, _ + 1) { i => result += elems(i) } result } /** * Multiply the buffer contents together, returning their product. */ def product(implicit ev: MultiplicativeMonoid[A]): A = { val limit = len var result: A = ev.one cfor(0)(_ < limit, _ + 1) { i => result *= elems(i) } result } /** * Find the p-norm of the buffer's contents. * * The p-norm generalizes notion of a length function. */ def norm(p: Int)(implicit ev: Field[A], s: Signed[A], nr: NRoot[A]): A = { val limit = len var result: A = ev.one cfor(0)(_ < limit, _ + 1) { i => result += elems(i).abs ** p } result nroot p } /** * Find the minimum value in this buffer. * * This method uses an instance of Spire's Order[A] type class to * compare the elements, to avoid boxing. If you want to use Scala's * Ordering, you can use compatibility code in Spire, or call * toIterable.min. */ def min(implicit o: Order[A]): A = { if (isEmpty) throw new UnsupportedOperationException() var result: A = elems(0) val limit = len cfor(1)(_ < limit, _ + 1) { i => result = result min elems(i) } result } /** * Find the maximum value in this buffer. * * This method uses an instance of Spire's Order[A] type class to * compare the elements, to avoid boxing. If you want to use Scala's * Ordering, you can use compatibility code in Spire, or call * toIterable.min. */ def max(implicit o: Order[A]): A = { if (isEmpty) throw new UnsupportedOperationException() var result: A = elems(0) val limit = len cfor(1)(_ < limit, _ + 1) { i => result = result max elems(i) } result } /** * Find the mean (average) value of this buffer's contents. */ def mean(implicit ev: Field[A]): A = { if (isEmpty) throw new UnsupportedOperationException() var result: A = ev.zero val limit = len cfor(0)(_ < limit, _ + 1) { i => result = (result * i / (i + 1)) + (elems(i) / (i + 1)) } result } /** * Sort the contents of the buffer. * * This method uses an instance of Spire's Order[A] type class to * compare the elements, to avoid boxing. If you want to use Scala's * Ordering, you can use compatibility code in Spire, or call * toIterable.min. */ def sort(implicit o: Order[A]): Unit = QuickSort.qsort(elems, 0, len - 1) /** * Create an array out of the elements in the buffer. * * This is an O(n) operation, where n is the length of the buffer. */ def toArray(): Array[A] = Util.alloc(elems, 0, len) /** * Wrap this buffer in an Iterable[A] instance. * * This method exists as a cheap way to get compatibility with Scala * collections without copying/conversion. Note that since Scala * collections are not specialized, using this iterable will box * values as they are accessed (although the underlying array will * still be unboxed). * * Like iterator, this method directly wraps the buffer. Thus, you * should not mutate the buffer while using the resulting iterable, * or risk corruption and undefined behavior. * * To get a "safe" value that is compatible with Scala collections, * consider using toVector, toList, or copy.toIterable. * * Creating the Iterable[A] instance is an O(1) operation. */ def toIterable(): Iterable[A] = new Iterable[A] { override def size: Int = lhs.length def iterator: Iterator[A] = lhs.iterator override def foreach[U](f: A => U): Unit = lhs.foreach(a => f(a)) } /** * Create a Vector[A] from this buffer's elements. * * This is an O(n) operation. */ def toVector(): Vector[A] = { import scala.collection.immutable.VectorBuilder val b = new VectorBuilder[A] b.sizeHint(len) cfor(0)(_ < len, _ + 1) { i => b += elems(i) } b.result } /** * Create a List[A] from this buffer's elements. * * This is an O(n) operation. */ def toList(): List[A] = { import scala.collection.mutable.ListBuffer val b = new ListBuffer[A] cfor(0)(_ < len, _ + 1) { i => b += elems(i) } b.toList } } object Buffer extends LowPriorityBufferImplicits { /** * Allocate an empty Buffer. */ def empty[@sp A: ClassTag]: Buffer[A] = ofSize[A](8) /** * Allocate an empty Buffer, capable of holding n items without * resizing itself. * * This method is useful if you know you'll be adding a large number * of elements in advance and you want to save a few resizes. */ def ofSize[@sp A: ClassTag](n: Int): Buffer[A] = new Buffer(new Array[A](Util.nextPowerOfTwo(n)), 0) /** * Fill a length-n Buffer with a constant value. * * If A is a reference type, all the elements in the Buffer will * point to the same 'a' instance. If it is known to be a value type * (e.g. Int) then all the values will be primitives. */ def fill[@sp A: ClassTag](n: Int)(a: A): Buffer[A] = unsafe(Array.fill(n)(a)) /** * Wrap an array instance directly in a Buffer. * * This method is named 'unsafe' because the underlying array could * potentially be modified somewhere else, changing or corrupting * the Buffer. You should only use this method when you know that * the array will not be stored or modified externally. */ def unsafe[@sp A: ClassTag](arr: Array[A]): Buffer[A] = new Buffer(arr, arr.length) /** * Build a Buffer instance from the provided values. */ def apply[A: ClassTag](args: A*): Buffer[A] = unsafe(args.toArray) /** * Build a Buffer from the provided array. * * Unlike 'unsafe' this method clones the given array, to prevent * possible corruption later. */ def fromArray[@sp A: ClassTag](arr: Array[A]): Buffer[A] = new Buffer(arr.clone, arr.length) /** * Build a Buffer from the provided iterable object. */ def fromIterable[@sp A: ClassTag](items: Iterable[A]): Buffer[A] = unsafe(items.toArray) /** * Provide an Order[Buffer[A]] instance. * * The empty buffer is considered "less-than" any non-empty buffer, * and non-empty buffers are compared lexicographically. Elemens are * compared using the given Order[A]. */ implicit def order[@sp A: Order]: Order[Buffer[A]] = new Order[Buffer[A]] { def compare(lhs: Buffer[A], rhs: Buffer[A]): Int = { val (minLength, lastResult) = if (lhs.length < rhs.length) (lhs.length, -1) else if (lhs.length == rhs.length) (lhs.length, 0) else (rhs.length, 1) cfor(0)(_ < minLength, _ + 1) { i => val n = lhs.elems(i) compare rhs.elems(i) if (n != 0) return n } lastResult } } /** * Provides a Monoid[Buffer[A]] instance. * * The identity value is an empty buffer, and the ++ operator is * used to concatenate two buffers without modifying their contents. */ implicit def monoid[@sp A: ClassTag]: Monoid[Buffer[A]] = new Monoid[Buffer[A]] { def empty: Buffer[A] = Buffer.empty[A] def combine(lhs: Buffer[A], rhs: Buffer[A]): Buffer[A] = lhs ++ rhs } /** * Alternative Monoid[Buffer[A]] which combines buffers in a * pairwise fashion. */ def pairwiseMonoid[@sp A: ClassTag: Monoid]: Monoid[Buffer[A]] = new Monoid[Buffer[A]] { def empty: Buffer[A] = Buffer.empty[A] def combine(lhs: Buffer[A], rhs: Buffer[A]): Buffer[A] = if (lhs.length >= rhs.length) { val out = lhs.copy cfor(0)(_ < rhs.elems.length, _ + 1) { i => out(i) = out(i) |+| rhs.elems(i) } out } else { val out = rhs.copy cfor(0)(_ < lhs.elems.length, _ + 1) { i => out(i) = lhs.elems(i) |+| out(i) } out } } } trait LowPriorityBufferImplicits { /** * Provide an Eq[Buffer[A]] instance. * * This method uses the given Eq[A] to compare each element * pairwise. Buffers are required to be the same length. */ implicit def eqv[@sp A: Eq]: Eq[Buffer[A]] = new Eq[Buffer[A]] { def eqv(lhs: Buffer[A], rhs: Buffer[A]): Boolean = { if (lhs.length != rhs.length) return false cfor(0)(_ < lhs.elems.length, _ + 1) { i => if (lhs.elems(i) =!= rhs.elems(i)) return false } true } } }
non/debox
src/main/scala/debox/Buffer.scala
Scala
mit
24,556
package com.sksamuel.elastic4s import org.elasticsearch.script.Script import org.elasticsearch.script.ScriptService.{ScriptType => ESScriptType} import scala.language.implicitConversions trait ScriptDsl { def script(script: String): ScriptDefinition = ScriptDefinition(script) def script(name: String, script: String) = ScriptDefinition(script) } case class ScriptDefinition(script: String, lang: Option[String] = None, scriptType: ESScriptType = ESScriptType.INLINE, params: Map[String, Any] = Map.empty) { import scala.collection.JavaConverters._ def lang(lang: String): ScriptDefinition = copy(lang = Option(lang)) def param(name: String, value: Any): ScriptDefinition = copy(params = params + (name -> value)) def params(first: (String, Any), rest: (String, AnyRef)*): ScriptDefinition = params(first +: rest) def params(seq: Seq[(String, Any)]): ScriptDefinition = params(seq.toMap) def params(map: Map[String, Any]): ScriptDefinition = copy(params = params ++ map) def scriptType(scriptType: ESScriptType): ScriptDefinition = copy(scriptType = scriptType) def toJavaAPI: Script = { if(params.isEmpty) { new Script(script, scriptType, lang.orNull, null) } else { val mappedParams = FieldsMapper.mapper(params).asJava new Script(script, scriptType, lang.orNull, mappedParams) } } } object ScriptDefinition { implicit def string2Script(script: String): ScriptDefinition = ScriptDefinition(script) }
muuki88/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/ScriptDsl.scala
Scala
apache-2.0
1,548
package io.github.junheng.akka.hbase.protocol object HScannerProtocol { case class HNext(step: Int) }
junheng/akka-hbase
protocol/src/main/scala/io/github/junheng/akka/hbase/protocol/HScannerProtocol.scala
Scala
mit
105
/* * NodeImpl.scala * (LucreEvent) * * Copyright (c) 2011-2015 Hanns Holger Rutz. All rights reserved. * * This software is published under the GNU Lesser General Public License v2.1+ * * * For further information, please contact Hanns Holger Rutz at * [email protected] */ package de.sciss.lucre package expr package impl import event.{impl => eimpl, Event, InvariantSelector} import de.sciss.model.Change import expr.{String => _String} trait NodeImpl[S <: event.Sys[S], A] extends Expr.Node[S, A] with eimpl.StandaloneLike[S, Change[A], Expr[S, A]] with InvariantSelector[S] { final def changed: Event[S, Change[A], Expr[S, A]] = this final def disposeData()(implicit tx: S#Tx) = () override def toString = s"Expr$id" }
Sciss/LucreEvent
expr/src/main/scala/de/sciss/lucre/expr/impl/NodeImpl.scala
Scala
lgpl-2.1
754
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.executor import java.io.File import java.net.URL import java.nio.ByteBuffer import java.util.Properties import scala.collection.mutable import scala.concurrent.duration._ import org.json4s.{DefaultFormats, Extraction} import org.json4s.JsonAST.{JArray, JObject} import org.json4s.JsonDSL._ import org.mockito.Mockito.when import org.scalatest.concurrent.Eventually.{eventually, timeout} import org.scalatestplus.mockito.MockitoSugar import org.apache.spark._ import org.apache.spark.TestUtils._ import org.apache.spark.resource._ import org.apache.spark.resource.ResourceUtils._ import org.apache.spark.resource.TestResourceIDs._ import org.apache.spark.rpc.RpcEnv import org.apache.spark.scheduler.TaskDescription import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.LaunchTask import org.apache.spark.serializer.JavaSerializer import org.apache.spark.util.{SerializableBuffer, Utils} class CoarseGrainedExecutorBackendSuite extends SparkFunSuite with LocalSparkContext with MockitoSugar { implicit val formats = DefaultFormats test("parsing no resources") { val conf = new SparkConf val resourceProfile = ResourceProfile.getOrCreateDefaultProfile(conf) val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function val backend = new CoarseGrainedExecutorBackend( env.rpcEnv, "driverurl", "1", "host1", "host1", 4, Seq.empty[URL], env, None, resourceProfile) withTempDir { tmpDir => val testResourceArgs: JObject = ("" -> "") val ja = JArray(List(testResourceArgs)) val f1 = createTempJsonFile(tmpDir, "resources", ja) var error = intercept[SparkException] { val parsedResources = backend.parseOrFindResources(Some(f1)) }.getMessage() assert(error.contains("Error parsing resources file"), s"Calling with no resources didn't error as expected, error: $error") } } test("parsing one resource") { val conf = new SparkConf conf.set(EXECUTOR_GPU_ID.amountConf, "2") val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function val backend = new CoarseGrainedExecutorBackend( env.rpcEnv, "driverurl", "1", "host1", "host1", 4, Seq.empty[URL], env, None, ResourceProfile.getOrCreateDefaultProfile(conf)) withTempDir { tmpDir => val ra = ResourceAllocation(EXECUTOR_GPU_ID, Seq("0", "1")) val ja = Extraction.decompose(Seq(ra)) val f1 = createTempJsonFile(tmpDir, "resources", ja) val parsedResources = backend.parseOrFindResources(Some(f1)) assert(parsedResources.size === 1) assert(parsedResources.get(GPU).nonEmpty) assert(parsedResources.get(GPU).get.name === GPU) assert(parsedResources.get(GPU).get.addresses.sameElements(Array("0", "1"))) } } test("parsing multiple resources resource profile") { val rpBuilder = new ResourceProfileBuilder val ereqs = new ExecutorResourceRequests().resource(GPU, 2) ereqs.resource(FPGA, 3) val rp = rpBuilder.require(ereqs).build testParsingMultipleResources(new SparkConf, rp) } test("parsing multiple resources") { val conf = new SparkConf conf.set(EXECUTOR_GPU_ID.amountConf, "2") conf.set(EXECUTOR_FPGA_ID.amountConf, "3") testParsingMultipleResources(conf, ResourceProfile.getOrCreateDefaultProfile(conf)) } def testParsingMultipleResources(conf: SparkConf, resourceProfile: ResourceProfile) { val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function val backend = new CoarseGrainedExecutorBackend( env.rpcEnv, "driverurl", "1", "host1", "host1", 4, Seq.empty[URL], env, None, resourceProfile) withTempDir { tmpDir => val gpuArgs = ResourceAllocation(EXECUTOR_GPU_ID, Seq("0", "1")) val fpgaArgs = ResourceAllocation(EXECUTOR_FPGA_ID, Seq("f1", "f2", "f3")) val ja = Extraction.decompose(Seq(gpuArgs, fpgaArgs)) val f1 = createTempJsonFile(tmpDir, "resources", ja) val parsedResources = backend.parseOrFindResources(Some(f1)) assert(parsedResources.size === 2) assert(parsedResources.get(GPU).nonEmpty) assert(parsedResources.get(GPU).get.name === GPU) assert(parsedResources.get(GPU).get.addresses.sameElements(Array("0", "1"))) assert(parsedResources.get(FPGA).nonEmpty) assert(parsedResources.get(FPGA).get.name === FPGA) assert(parsedResources.get(FPGA).get.addresses.sameElements(Array("f1", "f2", "f3"))) } } test("error checking parsing resources and executor and task configs") { val conf = new SparkConf conf.set(EXECUTOR_GPU_ID.amountConf, "2") val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function val backend = new CoarseGrainedExecutorBackend(env.rpcEnv, "driverurl", "1", "host1", "host1", 4, Seq.empty[URL], env, None, ResourceProfile.getOrCreateDefaultProfile(conf)) // not enough gpu's on the executor withTempDir { tmpDir => val gpuArgs = ResourceAllocation(EXECUTOR_GPU_ID, Seq("0")) val ja = Extraction.decompose(Seq(gpuArgs)) val f1 = createTempJsonFile(tmpDir, "resources", ja) var error = intercept[IllegalArgumentException] { val parsedResources = backend.parseOrFindResources(Some(f1)) }.getMessage() assert(error.contains("Resource: gpu, with addresses: 0 is less than what the " + "user requested: 2")) } // missing resource on the executor withTempDir { tmpDir => val fpga = ResourceAllocation(EXECUTOR_FPGA_ID, Seq("0")) val ja = Extraction.decompose(Seq(fpga)) val f1 = createTempJsonFile(tmpDir, "resources", ja) var error = intercept[SparkException] { val parsedResources = backend.parseOrFindResources(Some(f1)) }.getMessage() assert(error.contains("User is expecting to use resource: gpu, but didn't " + "specify a discovery script!")) } } test("executor resource found less than required resource profile") { val rpBuilder = new ResourceProfileBuilder val ereqs = new ExecutorResourceRequests().resource(GPU, 4) val treqs = new TaskResourceRequests().resource(GPU, 1) val rp = rpBuilder.require(ereqs).require(treqs).build testExecutorResourceFoundLessThanRequired(new SparkConf, rp) } test("executor resource found less than required") { val conf = new SparkConf() conf.set(EXECUTOR_GPU_ID.amountConf, "4") conf.set(TASK_GPU_ID.amountConf, "1") testExecutorResourceFoundLessThanRequired(conf, ResourceProfile.getOrCreateDefaultProfile(conf)) } private def testExecutorResourceFoundLessThanRequired( conf: SparkConf, resourceProfile: ResourceProfile) = { val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function val backend = new CoarseGrainedExecutorBackend(env.rpcEnv, "driverurl", "1", "host1", "host1", 4, Seq.empty[URL], env, None, resourceProfile) // executor resources < required withTempDir { tmpDir => val gpuArgs = ResourceAllocation(EXECUTOR_GPU_ID, Seq("0", "1")) val ja = Extraction.decompose(Seq(gpuArgs)) val f1 = createTempJsonFile(tmpDir, "resources", ja) var error = intercept[IllegalArgumentException] { val parsedResources = backend.parseOrFindResources(Some(f1)) }.getMessage() assert(error.contains("Resource: gpu, with addresses: 0,1 is less than what the " + "user requested: 4")) } } test("use resource discovery") { val conf = new SparkConf conf.set(EXECUTOR_FPGA_ID.amountConf, "3") assume(!(Utils.isWindows)) withTempDir { dir => val scriptPath = createTempScriptWithExpectedOutput(dir, "fpgaDiscoverScript", """{"name": "fpga","addresses":["f1", "f2", "f3"]}""") conf.set(EXECUTOR_FPGA_ID.discoveryScriptConf, scriptPath) val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function val backend = new CoarseGrainedExecutorBackend(env.rpcEnv, "driverurl", "1", "host1", "host1", 4, Seq.empty[URL], env, None, ResourceProfile.getOrCreateDefaultProfile(conf)) val parsedResources = backend.parseOrFindResources(None) assert(parsedResources.size === 1) assert(parsedResources.get(FPGA).nonEmpty) assert(parsedResources.get(FPGA).get.name === FPGA) assert(parsedResources.get(FPGA).get.addresses.sameElements(Array("f1", "f2", "f3"))) } } test("use resource discovery and allocated file option with resource profile") { assume(!(Utils.isWindows)) withTempDir { dir => val scriptPath = createTempScriptWithExpectedOutput(dir, "fpgaDiscoverScript", """{"name": "fpga","addresses":["f1", "f2", "f3"]}""") val rpBuilder = new ResourceProfileBuilder val ereqs = new ExecutorResourceRequests().resource(FPGA, 3, scriptPath) ereqs.resource(GPU, 2) val rp = rpBuilder.require(ereqs).build allocatedFileAndConfigsResourceDiscoveryTestFpga(dir, new SparkConf, rp) } } test("use resource discovery and allocated file option") { assume(!(Utils.isWindows)) withTempDir { dir => val scriptPath = createTempScriptWithExpectedOutput(dir, "fpgaDiscoverScript", """{"name": "fpga","addresses":["f1", "f2", "f3"]}""") val conf = new SparkConf conf.set(EXECUTOR_FPGA_ID.amountConf, "3") conf.set(EXECUTOR_FPGA_ID.discoveryScriptConf, scriptPath) conf.set(EXECUTOR_GPU_ID.amountConf, "2") val rp = ResourceProfile.getOrCreateDefaultProfile(conf) allocatedFileAndConfigsResourceDiscoveryTestFpga(dir, conf, rp) } } private def allocatedFileAndConfigsResourceDiscoveryTestFpga( dir: File, conf: SparkConf, resourceProfile: ResourceProfile) = { val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function val backend = new CoarseGrainedExecutorBackend(env.rpcEnv, "driverurl", "1", "host1", "host1", 4, Seq.empty[URL], env, None, resourceProfile) val gpuArgs = ResourceAllocation(EXECUTOR_GPU_ID, Seq("0", "1")) val ja = Extraction.decompose(Seq(gpuArgs)) val f1 = createTempJsonFile(dir, "resources", ja) val parsedResources = backend.parseOrFindResources(Some(f1)) assert(parsedResources.size === 2) assert(parsedResources.get(GPU).nonEmpty) assert(parsedResources.get(GPU).get.name === GPU) assert(parsedResources.get(GPU).get.addresses.sameElements(Array("0", "1"))) assert(parsedResources.get(FPGA).nonEmpty) assert(parsedResources.get(FPGA).get.name === FPGA) assert(parsedResources.get(FPGA).get.addresses.sameElements(Array("f1", "f2", "f3"))) } test("track allocated resources by taskId") { val conf = new SparkConf val securityMgr = new SecurityManager(conf) val serializer = new JavaSerializer(conf) var backend: CoarseGrainedExecutorBackend = null try { val rpcEnv = RpcEnv.create("1", "localhost", 0, conf, securityMgr) val env = createMockEnv(conf, serializer, Some(rpcEnv)) backend = new CoarseGrainedExecutorBackend(env.rpcEnv, rpcEnv.address.hostPort, "1", "host1", "host1", 4, Seq.empty[URL], env, None, resourceProfile = ResourceProfile.getOrCreateDefaultProfile(conf)) assert(backend.taskResources.isEmpty) val taskId = 1000000 // We don't really verify the data, just pass it around. val data = ByteBuffer.wrap(Array[Byte](1, 2, 3, 4)) val taskDescription = new TaskDescription(taskId, 2, "1", "TASK 1000000", 19, 1, mutable.Map.empty, mutable.Map.empty, new Properties, Map(GPU -> new ResourceInformation(GPU, Array("0", "1"))), data) val serializedTaskDescription = TaskDescription.encode(taskDescription) backend.executor = mock[Executor] backend.rpcEnv.setupEndpoint("Executor 1", backend) // Launch a new task shall add an entry to `taskResources` map. backend.self.send(LaunchTask(new SerializableBuffer(serializedTaskDescription))) eventually(timeout(10.seconds)) { assert(backend.taskResources.size == 1) val resources = backend.taskResources(taskId) assert(resources(GPU).addresses sameElements Array("0", "1")) } // Update the status of a running task shall not affect `taskResources` map. backend.statusUpdate(taskId, TaskState.RUNNING, data) assert(backend.taskResources.size == 1) val resources = backend.taskResources(taskId) assert(resources(GPU).addresses sameElements Array("0", "1")) // Update the status of a finished task shall remove the entry from `taskResources` map. backend.statusUpdate(taskId, TaskState.FINISHED, data) assert(backend.taskResources.isEmpty) } finally { if (backend != null) { backend.rpcEnv.shutdown() } } } test("SPARK-24203 when bindAddress is not set, it defaults to hostname") { val args1 = Array( "--driver-url", "driverurl", "--executor-id", "1", "--hostname", "host1", "--cores", "1", "--app-id", "app1") val arg = CoarseGrainedExecutorBackend.parseArguments(args1, "") assert(arg.bindAddress == "host1") } test("SPARK-24203 when bindAddress is different, it does not default to hostname") { val args1 = Array( "--driver-url", "driverurl", "--executor-id", "1", "--hostname", "host1", "--bind-address", "bindaddress1", "--cores", "1", "--app-id", "app1") val arg = CoarseGrainedExecutorBackend.parseArguments(args1, "") assert(arg.bindAddress == "bindaddress1") } private def createMockEnv(conf: SparkConf, serializer: JavaSerializer, rpcEnv: Option[RpcEnv] = None): SparkEnv = { val mockEnv = mock[SparkEnv] val mockRpcEnv = mock[RpcEnv] when(mockEnv.conf).thenReturn(conf) when(mockEnv.serializer).thenReturn(serializer) when(mockEnv.closureSerializer).thenReturn(serializer) when(mockEnv.rpcEnv).thenReturn(rpcEnv.getOrElse(mockRpcEnv)) SparkEnv.set(mockEnv) mockEnv } }
darionyaphet/spark
core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala
Scala
apache-2.0
15,461
package net.machinemuse.numina.network import java.io.{DataInputStream, DataOutputStream} import net.minecraft.item.ItemStack import net.minecraft.nbt.CompressedStreamTools /** * Author: MachineMuse (Claire Semple) * Created: 1:15 AM, 09/05/13 */ object RichInputStream { implicit def toRichStream(in: DataInputStream): RichInputStream = new RichInputStream(in) class RichInputStream(in: DataInputStream) { def readIntArray = (for (k <- 0 until in.readInt) yield in.readInt).toArray /** * Reads an ItemStack from the InputStream */ def readItemStack = { val tag = readNBTTagCompound if(tag != null){ ItemStack.loadItemStackFromNBT(tag) }else{ null } } /** * Reads a compressed NBTTagCompound from the InputStream */ def readNBTTagCompound = { val length = in.readShort() if (length != -1) { CompressedStreamTools.readCompressed(in) } else { null } } /** * Reads a string from a packet */ def readString = { val builder: StringBuilder = StringBuilder.newBuilder for (i <- 0 until in.readShort) builder.append(in.readChar) builder.toString() } } } object RichOutputStream { implicit def toRichStream(out: DataOutputStream): RichOutputStream = new RichOutputStream(out) class RichOutputStream(out: DataOutputStream) { } }
MachineMuse/Numina
src/main/scala/net/machinemuse/numina/network/RichIOStreams.scala
Scala
bsd-2-clause
1,415
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.zeppelin.display.angular import org.apache.zeppelin.display.{AngularObjectRegistry, GUI} import org.apache.zeppelin.interpreter._ import org.apache.zeppelin.user.AuthenticationInfo import org.scalatest.concurrent.Eventually import org.scalatest.{BeforeAndAfter, BeforeAndAfterEach, FlatSpec, Matchers} /** * Abstract Test for AngularModel */ trait AbstractAngularModelTest extends FlatSpec with BeforeAndAfter with BeforeAndAfterEach with Eventually with Matchers { override def beforeEach() { val intpGroup = new InterpreterGroup() val context = new InterpreterContext("note", "id", null, "title", "text", new AuthenticationInfo(), new java.util.HashMap[String, Object](), new GUI(), new AngularObjectRegistry( intpGroup.getId(), null), null, new java.util.LinkedList[InterpreterContextRunner](), new InterpreterOutput(new InterpreterOutputListener() { override def onAppend(out: InterpreterOutput, line: Array[Byte]): Unit = { // nothing to do } override def onUpdate(out: InterpreterOutput, output: Array[Byte]): Unit = { // nothing to do } })) InterpreterContext.set(context) super.beforeEach() // To be stackable, must call super.beforeEach } def angularModel(name: String): AbstractAngularModel def angularModel(name: String, value: Any): AbstractAngularModel "AngularModel" should "able to create AngularObject" in { val registry = InterpreterContext.get().getAngularObjectRegistry registrySize should be(0) angularModel("model1")() should be(None) registrySize should be(0) angularModel("model1", "value1")() should be("value1") registrySize should be(1) angularModel("model1")() should be("value1") registrySize should be(1) } "AngularModel" should "able to update AngularObject" in { val registry = InterpreterContext.get().getAngularObjectRegistry val model1 = angularModel("model1", "value1") model1() should be("value1") registrySize should be(1) model1.value("newValue1") model1() should be("newValue1") registrySize should be(1) angularModel("model1", "value2")() should be("value2") registrySize should be(1) } "AngularModel" should "able to remove AngularObject" in { angularModel("model1", "value1") registrySize should be(1) angularModel("model1").remove() registrySize should be(0) } def registry() = { InterpreterContext.get().getAngularObjectRegistry } def registrySize() = { registry().getAllWithGlobal(InterpreterContext.get().getNoteId).size } }
vgmartinez/incubator-zeppelin
zeppelin-display/src/test/scala/org/apache/zeppelin/display/angular/AbstractAngularModelTest.scala
Scala
apache-2.0
3,439
package com.arcusys.valamis.slide.model object SlideConstants { val PlainTextIdPrefix = "t_" val QuestionIdPrefix = "q_" }
arcusys/Valamis
valamis-slide/src/main/scala/com/arcusys/valamis/slide/model/SlideConstants.scala
Scala
gpl-3.0
129
package lang.akka.first import akka.actor.{ActorSystem, Props} import akka.dispatch.MessageDispatcher object FirstApp extends App { println("------------------ hello actor ---------------------------------") val system = ActorSystem("myActorSystem") private val lookup: MessageDispatcher = system.dispatchers.defaultGlobalDispatcher println(lookup) private val firstActor = system.actorOf(Props[FirstActor],"FirstActor") for (i <- "123456789") { firstActor.toString() firstActor.tell("test", firstActor) firstActor.tell(i, firstActor) } }
congdepeng/scalab
src/main/scala/lang/akka/first/FirstApp.scala
Scala
apache-2.0
573
package com.socrata.soql.functions import com.socrata.soql.types._ object SoQLTypeClasses { val Ordered = Set[SoQLType]( SoQLText, SoQLNumber, SoQLDouble, SoQLMoney, SoQLBoolean, SoQLFixedTimestamp, SoQLFloatingTimestamp, SoQLDate, SoQLTime, SoQLID, SoQLVersion, SoQLUrl ) val GeospatialLike = Set[SoQLType](SoQLPoint, SoQLMultiPoint, SoQLLine, SoQLMultiLine, SoQLPolygon, SoQLMultiPolygon) val Equatable = Ordered ++ GeospatialLike ++ Set[SoQLType]( SoQLBlob, SoQLPhone, SoQLPhoto, SoQLLocation, SoQLUrl, SoQLJson ) val NumLike = Set[SoQLType](SoQLNumber, SoQLDouble, SoQLMoney) val RealNumLike = Set[SoQLType](SoQLNumber, SoQLDouble) val TimestampLike = Set[SoQLType](SoQLFixedTimestamp, SoQLFloatingTimestamp) }
socrata-platform/soql-reference
soql-stdlib/src/main/scala/com/socrata/soql/functions/SoQLTypeClasses.scala
Scala
apache-2.0
808
package cortex.io import java.io._ import java.net.{InetSocketAddress, ServerSocket, Socket} import cortex.util.log import scala.concurrent.ExecutionContext.global import scala.concurrent.{ExecutionContext, Future} /** * Essentially our server, this manages the input and output * to and fro the server. */ abstract class IOManager(port: Int, executionContext: ExecutionContext = global) { implicit val _executionContext = executionContext // open new server socket on selected port lazy val server = { val ss = new ServerSocket() try { ss.setReuseAddress(true) ss.bind(new InetSocketAddress(port)) } catch { case e: Exception => e.printStackTrace() } ss } @volatile var isRunning = false protected def ioLoop(socket: Socket) @inline private def socketLoop(server: ServerSocket) = { var socket: Socket = null try { log trace "Awaiting request" // system-level wait while we literally wait on a request socket = server.accept() log trace "Socket accepted" // spawn off a new Future once a request has been accepted Future { ioLoop(socket) } } catch { case e: Exception => log error e.getMessage e.printStackTrace() socket.close() } } def shutdown() = { isRunning = false try { server.close() } catch { case e: IOException => e.printStackTrace() } } /** * Main IO loop. */ def loop() = { isRunning = true Future { // begin main loop while (isRunning) { socketLoop(server) } } } }
jsflax/cortex
src/main/scala/cortex/io/IOManager.scala
Scala
mit
1,656
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.carbondata import java.io.IOException import mockit.{Mock, MockUp} import org.apache.spark.sql.CarbonEnv import org.apache.spark.sql.test.util.QueryTest import org.scalatest.BeforeAndAfterAll import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.statusmanager.SegmentStatusManager import org.apache.carbondata.core.util.CarbonProperties import org.apache.carbondata.core.util.path.CarbonTablePath class TableStatusBackupTest extends QueryTest with BeforeAndAfterAll { override protected def beforeAll(): Unit = { CarbonProperties.getInstance().addProperty( CarbonCommonConstants.ENABLE_TABLE_STATUS_BACKUP, "true") sql("drop table if exists source") sql("create table source(a string) stored as carbondata") } override protected def afterAll(): Unit = { sql("drop table if exists source") CarbonProperties.getInstance().addProperty( CarbonCommonConstants.ENABLE_TABLE_STATUS_BACKUP, "false") } test("backup table status file") { sql("insert into source values ('A'), ('B')") val tablePath = CarbonEnv.getCarbonTable(None, "source")(sqlContext.sparkSession).getTablePath val tableStatusFilePath = CarbonTablePath.getTableStatusFilePath(tablePath) val oldTableStatus = SegmentStatusManager.readTableStatusFile(tableStatusFilePath) var mock = new MockUp[SegmentStatusManager]() { @Mock @throws[IOException] def mockForTest(): Unit = { throw new IOException("thrown in mock") } } val exception = intercept[IOException] { sql("insert into source values ('A'), ('B')") } assert(exception.getMessage.contains("thrown in mock")) val backupPath = tableStatusFilePath + ".backup" assert(FileFactory.isFileExist(backupPath)) val backupTableStatus = SegmentStatusManager.readTableStatusFile(backupPath) assertResult(oldTableStatus)(backupTableStatus) mock = new MockUp[SegmentStatusManager]() { @Mock def mockForTest(): Unit = { } } } }
zzcclp/carbondata
integration/spark/src/test/scala/org/apache/spark/carbondata/TableStatusBackupTest.scala
Scala
apache-2.0
2,928
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.sql.{AnalysisException, QueryTest, Row} import org.apache.spark.sql.catalog.Table import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.types.StructType class GlobalTempViewSuite extends QueryTest with SharedSQLContext { import testImplicits._ override protected def beforeAll(): Unit = { super.beforeAll() globalTempDB = spark.sharedState.globalTempViewManager.database } private var globalTempDB: String = _ test("basic semantic") { sql("CREATE GLOBAL TEMP VIEW src AS SELECT 1, 'a'") // If there is no database in table name, we should try local temp view first, if not found, // try table/view in current database, which is "default" in this case. So we expect // NoSuchTableException here. intercept[NoSuchTableException](spark.table("src")) // Use qualified name to refer to the global temp view explicitly. checkAnswer(spark.table(s"$globalTempDB.src"), Row(1, "a")) // Table name without database will never refer to a global temp view. intercept[NoSuchTableException](sql("DROP VIEW src")) sql(s"DROP VIEW $globalTempDB.src") // The global temp view should be dropped successfully. intercept[NoSuchTableException](spark.table(s"$globalTempDB.src")) // We can also use Dataset API to create global temp view Seq(1 -> "a").toDF("i", "j").createGlobalTempView("src") checkAnswer(spark.table(s"$globalTempDB.src"), Row(1, "a")) // Use qualified name to rename a global temp view. sql(s"ALTER VIEW $globalTempDB.src RENAME TO src2") intercept[NoSuchTableException](spark.table(s"$globalTempDB.src")) checkAnswer(spark.table(s"$globalTempDB.src2"), Row(1, "a")) // Use qualified name to alter a global temp view. sql(s"ALTER VIEW $globalTempDB.src2 AS SELECT 2, 'b'") checkAnswer(spark.table(s"$globalTempDB.src2"), Row(2, "b")) // We can also use Catalog API to drop global temp view spark.catalog.dropGlobalTempView("src2") intercept[NoSuchTableException](spark.table(s"$globalTempDB.src2")) } test("global temp view is shared among all sessions") { try { sql("CREATE GLOBAL TEMP VIEW src AS SELECT 1, 2") checkAnswer(spark.table(s"$globalTempDB.src"), Row(1, 2)) val newSession = spark.newSession() checkAnswer(newSession.table(s"$globalTempDB.src"), Row(1, 2)) } finally { spark.catalog.dropGlobalTempView("src") } } test("global temp view database should be preserved") { val e = intercept[AnalysisException](sql(s"CREATE DATABASE $globalTempDB")) assert(e.message.contains("system preserved database")) val e2 = intercept[AnalysisException](sql(s"USE $globalTempDB")) assert(e2.message.contains("system preserved database")) } test("CREATE GLOBAL TEMP VIEW USING") { withTempPath { path => try { Seq(1 -> "a").toDF("i", "j").write.parquet(path.getAbsolutePath) sql(s"CREATE GLOBAL TEMP VIEW src USING parquet OPTIONS (PATH '${path.getAbsolutePath}')") checkAnswer(spark.table(s"$globalTempDB.src"), Row(1, "a")) sql(s"INSERT INTO $globalTempDB.src SELECT 2, 'b'") checkAnswer(spark.table(s"$globalTempDB.src"), Row(1, "a") :: Row(2, "b") :: Nil) } finally { spark.catalog.dropGlobalTempView("src") } } } test("CREATE TABLE LIKE should work for global temp view") { try { sql("CREATE GLOBAL TEMP VIEW src AS SELECT 1 AS a, '2' AS b") sql(s"CREATE TABLE cloned LIKE ${globalTempDB}.src") val tableMeta = spark.sessionState.catalog.getTableMetadata(TableIdentifier("cloned")) assert(tableMeta.schema == new StructType().add("a", "int", false).add("b", "string", false)) } finally { spark.catalog.dropGlobalTempView("src") sql("DROP TABLE default.cloned") } } test("list global temp views") { try { sql("CREATE GLOBAL TEMP VIEW v1 AS SELECT 3, 4") sql("CREATE TEMP VIEW v2 AS SELECT 1, 2") checkAnswer(sql(s"SHOW TABLES IN $globalTempDB"), Row(globalTempDB, "v1", true) :: Row("", "v2", true) :: Nil) assert(spark.catalog.listTables(globalTempDB).collect().toSeq.map(_.name) == Seq("v1", "v2")) } finally { spark.catalog.dropTempView("v1") spark.catalog.dropGlobalTempView("v2") } } test("should lookup global temp view if and only if global temp db is specified") { try { sql("CREATE GLOBAL TEMP VIEW same_name AS SELECT 3, 4") sql("CREATE TEMP VIEW same_name AS SELECT 1, 2") checkAnswer(sql("SELECT * FROM same_name"), Row(1, 2)) // we never lookup global temp views if database is not specified in table name spark.catalog.dropTempView("same_name") intercept[AnalysisException](sql("SELECT * FROM same_name")) // Use qualified name to lookup a global temp view. checkAnswer(sql(s"SELECT * FROM $globalTempDB.same_name"), Row(3, 4)) } finally { spark.catalog.dropTempView("same_name") spark.catalog.dropGlobalTempView("same_name") } } test("public Catalog should recognize global temp view") { try { sql("CREATE GLOBAL TEMP VIEW src AS SELECT 1, 2") assert(spark.catalog.tableExists(globalTempDB, "src")) assert(spark.catalog.getTable(globalTempDB, "src").toString == new Table( name = "src", database = globalTempDB, description = null, tableType = "TEMPORARY", isTemporary = true).toString) } finally { spark.catalog.dropGlobalTempView("src") } } }
javalovelinux/SparkGroovyScript
sql/core/src/test/scala/org/apache/spark/sql/execution/GlobalTempViewSuite.scala
Scala
apache-2.0
6,535
import sbt._ import sbt.Keys._ import net.virtualvoid.sbt.graph.Plugin._ object ProjectBuild extends Build { lazy val project = Project( id = "root", base = file("."), settings = Project.defaultSettings ++ graphSettings ++ Seq( name := "spark-kafka", organization := "com.tresata", version := "0.5.0-SNAPSHOT", scalaVersion := "2.10.4", crossScalaVersions := Seq("2.10.4", "2.11.6"), javacOptions ++= Seq("-Xlint:unchecked", "-source", "1.6", "-target", "1.6"), scalacOptions ++= Seq("-unchecked", "-deprecation", "-target:jvm-1.6"), libraryDependencies ++= Seq( "org.slf4j" % "slf4j-api" % "1.7.5" % "compile", "org.apache.kafka" %% "kafka" % "0.8.2.1" % "compile" exclude("org.jboss.netty", "netty") exclude("com.sun.jmx", "jmxri") exclude("com.sun.jdmk", "jmxtools") exclude("javax.jms", "jms") exclude("javax.mail", "mail") exclude("jline", "jline"), "org.slf4j" % "slf4j-api" % "1.6.1" % "provided", "org.apache.spark" %% "spark-core" % "1.4.0" % "provided", "org.slf4j" % "slf4j-log4j12" % "1.7.5" % "test", "org.scalatest" %% "scalatest" % "2.2.1" % "test" ), publishMavenStyle := true, pomIncludeRepository := { x => false }, publishArtifact in Test := false, publishTo := { val nexus = "https://oss.sonatype.org/" if (isSnapshot.value) Some("snapshots" at nexus + "content/repositories/snapshots") else Some("releases" at nexus + "service/local/staging/deploy/maven2") }, credentials += Credentials(Path.userHome / ".m2" / "credentials_sonatype"), pomExtra := ( <url>https://github.com/tresata/spark-scalding</url> <licenses> <license> <name>Apache 2</name> <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url> <distribution>repo</distribution> <comments>A business-friendly OSS license</comments> </license> </licenses> <scm> <url>[email protected]:tresata/spark-scalding.git</url> <connection>scm:git:[email protected]:tresata/spark-scalding.git</connection> </scm> <developers> <developer> <id>koertkuipers</id> <name>Koert Kuipers</name> <url>https://github.com/koertkuipers</url> </developer> </developers>) ) ) }
pronix/spark-kafka
project/Build.scala
Scala
apache-2.0
2,499
/* Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package de.hpi.ingestion.datamerge import java.util.UUID import de.hpi.ingestion.framework.SparkJob import com.datastax.spark.connector._ import de.hpi.ingestion.dataimport.JSONParser import de.hpi.ingestion.datalake.models.{Subject, Version} import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import play.api.libs.json.{JsValue, Json} class MasterUpdate extends SparkJob { import MasterUpdate._ appName = "Master Update" configFile = "master_update.xml" var subjects: RDD[Subject] = _ var updatedMasters: RDD[Subject] = _ // $COVERAGE-OFF$ /** * Loads the Subjects from the Cassandra. * @param sc Spark Context used to load the RDDs */ override def load(sc: SparkContext): Unit = { subjects = sc.cassandraTable[Subject](settings("subjectKeyspace"), settings("subjectTable")) } /** * Writes the updated master nodes to the Cassandra. * @param sc Spark Context used to connect to the Cassandra or the HDFS */ override def save(sc: SparkContext): Unit = { updatedMasters.saveToCassandra(settings("subjectKeyspace"), settings("subjectTable")) } // $COVERAGE-ON$ /** * Updates the master nodes by newly generating their data. * @param sc Spark Context used to e.g. broadcast variables */ override def run(sc: SparkContext): Unit = { val version = Version(appName, List("master update"), sc, true, settings.get("subjectTable")) val masterGroups = updateSubjects(subjects, conf.commitJsonOpt) .map(subject => (subject.master, List(subject))) .reduceByKey(_ ++ _) .values updatedMasters = masterGroups.map { masterGroup => val master = masterGroup.find(_.isMaster).get val slaves = masterGroup.filter(_.isSlave) Merging.mergeIntoMaster(master, slaves, version).head } } } object MasterUpdate extends JSONParser { /** * Parses the Commit JSON Object to extract the ids of the changed master Subjects. * @param commitJson the JSON of the Commit job. Contains all changes done in the commit * @return Set of master ids that will be updated */ def getMastersFromCommit(commitJson: JsValue): Set[UUID] = { (extractMap(commitJson, List("created")).keySet ++ extractMap(commitJson, List("updated")).keySet ++ extractMap(commitJson, List("deleted")).keySet ).map(UUID.fromString(_)) } /** * Filters the passed Subjects for only those that need to be updated, if this job is executed after a Commit job. * @param subjects RDD of Subjects that will be filtered * @param commitConfOption command line option of curation commit JSON object that contains the changed Subjects * @return the Subjects that need to be updated, if the JSON of a Commit job is provided. Otherwise the input * Subjects are returned */ def updateSubjects(subjects: RDD[Subject], commitConfOption: Option[String] = None): RDD[Subject] = { commitConfOption .map(Json.parse) .map(getMastersFromCommit) .map(masterIds => subjects.filter(subject => masterIds(subject.master))) .getOrElse(subjects) } }
bpn1/ingestion
src/main/scala/de/hpi/ingestion/datamerge/MasterUpdate.scala
Scala
apache-2.0
3,905
package com.twitter.gizzard.scheduler import scala.collection.mutable import com.twitter.gizzard.shards.{ShardBlackHoleException, ShardOfflineException} /** * A wrapper Job for a series of smaller jobs that should be executed together. It will attempt * to execute all the smaller jobs, regardless of individual failures. Failed jobs will remain * in our queue and can be retried later. */ abstract class NestedJob(val jobs: Iterable[JsonJob]) extends JsonJob { val taskQueue = { val q = new mutable.Queue[JsonJob]() q ++= jobs q } def apply() { val failedTasks = mutable.Buffer[JsonJob]() var lastNormalException: Option[Throwable] = None var lastOfflineException: Option[ShardOfflineException] = None var lastBlackHoleException: Option[ShardBlackHoleException] = None while (!taskQueue.isEmpty) { val task = taskQueue.head try { task.apply() } catch { case e: ShardBlackHoleException => lastBlackHoleException = Some(e) case e: ShardOfflineException => failedTasks += task lastOfflineException = Some(e) case e => failedTasks += task lastNormalException = Some(e) } taskQueue.dequeue() } if (!failedTasks.isEmpty) { taskQueue ++= failedTasks } // Prioritize exceptions: // (1) ShardOfflineException - jobs with this exception should be retried forever // (2) Regular Exception // (3) ShardBlackHoleException - jobs with this exception will not be retried lastOfflineException orElse lastNormalException orElse lastBlackHoleException foreach { e => throw e } } override def loggingName = jobs.map { _.loggingName }.mkString(",") override def equals(other: Any) = { other match { case other: NestedJob if (other ne null) => taskQueue.toList == other.taskQueue.toList case _ => false } } override def toString = "<NestedJob: tasks=%d: %s>".format(taskQueue.size, jobs) }
kmiku7/gizzard
src/main/scala/com/twitter/gizzard/scheduler/NestedJob.scala
Scala
apache-2.0
2,031
package com.atomist.rug.kind.pom import com.atomist.rug.kind.build.{BuildViewMutatingFunctions, BuildViewNonMutatingFunctions} import com.atomist.rug.kind.core.ProjectMutableView import com.atomist.rug.kind.xml.XmlMutableView import com.atomist.rug.spi.{ExportFunction, ExportFunctionParameterDescription, TerminalView} import com.atomist.source.FileArtifact object PomMutableView { val project = "project" val projectBaseXPath = s"/$project" val mavenGroupId = "groupId" val mavenArtifactId = "artifactId" val groupIdXPath = s"$projectBaseXPath/$mavenGroupId" val artifactIdXPath = s"$projectBaseXPath/$mavenArtifactId" val version = "version" val versionXPath = s"$projectBaseXPath/$version" val scope = "scope" val packagingXPath = s"$projectBaseXPath/packaging" val name = "name" val nameXPath = s"$projectBaseXPath/$name" val descriptionXPath = s"$projectBaseXPath/description" val parent = "parent" val parentBaseXPath = s"$projectBaseXPath/$parent" val parentGroupIdXPath = s"$parentBaseXPath/$mavenGroupId" val parentArtifactIdXPath = s"$parentBaseXPath/$mavenArtifactId" val parentVersionXPath = s"$parentBaseXPath/version" val projectPropertyBaseXPath = s"$projectBaseXPath/properties" val dependency = "dependency" val dependencies = "dependencies" val dependenciesBaseXPath = s"$projectBaseXPath/$dependencies" val dependencyBaseXPath = s"$projectBaseXPath/$dependencies/$dependency" val plugin = "plugin" val pluginManagement = "pluginManagement" val plugins = "plugins" val buildBaseXPath = s"$projectBaseXPath/build" val buildPluginManagementBaseXPath = s"$buildBaseXPath/$pluginManagement" val buildPluginsBaseXPath = s"$buildBaseXPath/$plugins" val dependencyManagement = "dependencyManagement" val dependencyManagementBaseXPath = s"$projectBaseXPath/$dependencyManagement" } trait PomMutableViewNonMutatingFunctions extends BuildViewNonMutatingFunctions { import PomMutableView._ def getTextContentFor(xpath: String): String def contains(xpath: String): Boolean @ExportFunction(readOnly = true, description = "Return the content of the groupId element") def groupId: String = getTextContentFor(groupIdXPath) @ExportFunction(readOnly = true, description = "Return the content of the artifactId element") def artifactId: String = getTextContentFor(artifactIdXPath) @ExportFunction(readOnly = true, description = "Return the content of the version element") def version: String = getTextContentFor(versionXPath) @ExportFunction(readOnly = true, description = "Return the content of the packaging element") def packaging: String = getTextContentFor(packagingXPath) @ExportFunction(readOnly = true, description = "Return the content of the name element") def name: String = getTextContentFor(nameXPath) @ExportFunction(readOnly = true, description = "Return the content of the description element") def description: String = getTextContentFor(descriptionXPath) @ExportFunction(readOnly = true, description = "Return the content of the parent groupId") def parentGroupId: String = getTextContentFor(parentGroupIdXPath) @ExportFunction(readOnly = true, description = "Return the content of the parent artifactId") def parentArtifactId: String = getTextContentFor(parentArtifactIdXPath) @ExportFunction(readOnly = true, description = "Return the content of the parent version") def parentVersion: String = getTextContentFor(parentVersionXPath) @ExportFunction(readOnly = true, description = "Return the value of a project property") def property(@ExportFunctionParameterDescription(name = "projectPropertyName", description = "The project property you are looking to inspect") projectPropertyName: String): String = getTextContentFor(s"$projectPropertyBaseXPath/$projectPropertyName") @ExportFunction(readOnly = true, description = "Return the value of a dependency's version as specified by artifactId") def dependencyVersion(@ExportFunctionParameterDescription(name = "groupId", description = "The groupId of the dependency you are looking to inspect") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The artifactId of the dependency you are looking to inspect") artifactId: String): String = getTextContentFor(s"$dependencyBaseXPath/version[../$mavenArtifactId = '$artifactId'and ../$mavenGroupId [text() = '$groupId']]") @ExportFunction(readOnly = true, description = "Return the value of a dependency's scope as specified by artifactId") def dependencyScope(@ExportFunctionParameterDescription(name = "groupId", description = "The groupId of the dependency you are looking to inspect") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The artifactId of the dependency you are looking to inspect") artifactId: String): String = getTextContentFor(s"$dependencyBaseXPath/$scope[../$mavenArtifactId = '$artifactId'and ../$mavenGroupId [text() = '$groupId']]") @ExportFunction(readOnly = true, description = "Return whether a dependency is present as specified by artifactId and groupId") def isDependencyPresent(@ExportFunctionParameterDescription(name = "groupId", description = "The groupId of the dependency you are looking to test the presence of") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The artifactId of the dependency you are looking to test the presence of") artifactId: String): Boolean = contains(s"$dependencyBaseXPath/$mavenArtifactId[text() = '$artifactId' and ../$mavenGroupId [text() = '$groupId']]") @ExportFunction(readOnly = true, description = "Return whether a build plugin is present as specified by artifactId and groupId") def isBuildPluginPresent(@ExportFunctionParameterDescription(name = "groupId", description = "The groupId of the build plugin you are looking to test the presence of") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The artifactId of the build plugin you are looking to test the presence of") artifactId: String): Boolean = contains(s"$buildPluginsBaseXPath/$plugin/$mavenArtifactId [text() = '$artifactId' and ../$mavenGroupId [text() = '$groupId']]") @ExportFunction(readOnly = true, description = "Return whether a build plugin management plugin is present as specified by artifactId and groupId") def isBuildPluginManagementPresent(@ExportFunctionParameterDescription(name = "groupId", description = "The groupId of the build plugin management plugin you are looking to test the presence of") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The artifactId of the build plugin management plugin you are looking to test the presence of") artifactId: String): Boolean = contains(s"$buildPluginManagementBaseXPath/$plugins/$plugin/$mavenArtifactId [text() = '$artifactId' and ../$mavenGroupId [text() = '$groupId']]") @ExportFunction(readOnly = true, description = "Return whether a dependency management dependency is present as specified by artifactId and groupId") def isDependencyManagementDependencyPresent(@ExportFunctionParameterDescription(name = "groupId", description = "The groupId of the dependency management dependency you are looking to test the presence of") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The artifactId of the dependency management dependency you are looking to test the presence of") artifactId: String): Boolean = contains(s"$dependencyManagementBaseXPath/$dependencies/$dependency/$mavenArtifactId [text() = '$artifactId' and ../$mavenGroupId [text() = '$groupId']]") @ExportFunction(readOnly = true, description = "") def isProfilePresent(@ExportFunctionParameterDescription(name = "id", description = "") id: String): Boolean = contains(s"$projectBaseXPath/profiles/profile/id [text() = '$id']") } trait PomMutableViewMutatingFunctions extends BuildViewMutatingFunctions { import PomMutableView._ def setTextContentFor(xpath: String, newContent: String): Unit def addOrReplaceNode(xpathToParent: String, xpathToNode: String, newNode: String, nodeContent: String): Unit def addNodeIfNotPresent(xpathToParent: String, xpathToNode: String, newNode: String, nodeContent: String): Unit def deleteNode(xpath: String): Unit @ExportFunction(readOnly = false, description = "Set the content of the groupId element") def setGroupId(@ExportFunctionParameterDescription(name = "newGroupId", description = "The groupId that you are trying to set") newGroupId: String): Unit = setTextContentFor(groupIdXPath, newGroupId) @ExportFunction(readOnly = false, description = "Set the content of the artifactId element") def setArtifactId(@ExportFunctionParameterDescription(name = "newArtifactId", description = "The artifactId that you are trying to set") newArtifactId: String): Unit = setTextContentFor(artifactIdXPath, newArtifactId) @ExportFunction(readOnly = false, description = "Set the content of the version element") def setVersion(@ExportFunctionParameterDescription(name = "newVersion", description = "The version that you are trying to set") newVersion: String): Unit = setTextContentFor(versionXPath, newVersion) @ExportFunction(readOnly = false, description = "Set the content of the packaging element") def setPackaging(@ExportFunctionParameterDescription(name = "newPackaging", description = "The packaging that you are trying to set") newPackaging: String): Unit = setTextContentFor(packagingXPath, newPackaging) @ExportFunction(readOnly = false, description = "Add or replace project name") def setProjectName(@ExportFunctionParameterDescription(name = "newName", description = "The name being set") newName: String): Unit = addOrReplaceNode(s"$projectPropertyBaseXPath", nameXPath, name, s"<$name>$newName</$name>") @ExportFunction(readOnly = false, description = "Set the content of the description element") def setDescription(@ExportFunctionParameterDescription(name = "newDescription", description = "The description that you are trying to set") newDescription: String): Unit = setTextContentFor(descriptionXPath, newDescription) @ExportFunction(readOnly = false, description = "Set the content of the parent groupId element") def setParentGroupId(@ExportFunctionParameterDescription(name = "newParentGroupId", description = "The parent groupId that you are trying to set") newParentGroupId: String): Unit = setTextContentFor(parentGroupIdXPath, newParentGroupId) @ExportFunction(readOnly = false, description = "Set the content of the parent artifactId element") def setParentArtifactId(@ExportFunctionParameterDescription(name = "newParentArtifactId", description = "The parent artifactId that you are trying to set") newParentArtifactId: String): Unit = setTextContentFor(parentArtifactIdXPath, newParentArtifactId) @ExportFunction(readOnly = false, description = "Set the content of the parent version element") def setParentVersion(@ExportFunctionParameterDescription(name = "newParentVersion", description = "The parent version that you are trying to set") newParentVersion: String): Unit = setTextContentFor(parentVersionXPath, newParentVersion) @ExportFunction(readOnly = false, description = "Set the content of the parent block") def replaceParent(@ExportFunctionParameterDescription(name = "newParentBlock", description = "The parent block that you are trying to set") newParentBlock: String): Unit = addOrReplaceNode(parentBaseXPath, parentBaseXPath, parent, newParentBlock) @ExportFunction(readOnly = false, description = "Add or replace a property") def addOrReplaceProperty(@ExportFunctionParameterDescription(name = "propertyName", description = "The name of the property being set") propertyName: String, @ExportFunctionParameterDescription(name = "propertyValue", description = "The value of the property being set") propertyValue: String): Unit = addOrReplaceNode(s"$projectPropertyBaseXPath", s"/project/properties/$propertyName", propertyName, s"<$propertyName>$propertyValue</$propertyName>") @ExportFunction(readOnly = false, description = "Remove a property") def removeProperty(@ExportFunctionParameterDescription(name = "propertyName", description = "The name of the project property being deleted") propertyName: String): Unit = deleteNode(s"$projectPropertyBaseXPath/$propertyName") @ExportFunction(readOnly = false, description = "Add or replace a dependency") def addOrReplaceDependency(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String): Unit = addOrReplaceNode(dependenciesBaseXPath, s"/project/dependencies/dependency/artifactId[text()='$artifactId' and ../groupId[text() = '$groupId']]/..", dependency, s"""<dependency><groupId>$groupId</groupId><artifactId>$artifactId</artifactId></dependency>""") @ExportFunction(readOnly = false, description = "Add or replace a dependency") def addOrReplaceDependencyOfScope(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "scope", description = "The value of the dependency's scope") scope: String): Unit = addOrReplaceNode(dependenciesBaseXPath, s"/project/dependencies/dependency/artifactId[text()='$artifactId' and ../groupId[text() = '$groupId']]/..", dependency, s"""<dependency><groupId>$groupId</groupId><artifactId>$artifactId</artifactId><scope>$scope</scope></dependency>""") @ExportFunction(readOnly = false, description = "Add or replace a dependency, providing version and scope") def addOrReplaceDependencyOfVersion(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "newVersion", description = "The value of the dependency's version to be set") version: String): Unit = addOrReplaceNode(dependenciesBaseXPath, s"/project/dependencies/dependency/artifactId[text()='$artifactId' and ../groupId[text() = '$groupId']]/..", dependency, s"""<dependency><groupId>$groupId</groupId><artifactId>$artifactId</artifactId><version>$version</version></dependency>""") @ExportFunction(readOnly = false, description = "Add or replace a dependency, providing version and scope") def addOrReplaceDependencyOfVersionAndScope(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "newVersion", description = "The value of the dependency's version to be set") version: String, @ExportFunctionParameterDescription(name = "scope", description = "The value of the dependency's scope to be set") scope: String): Unit = addOrReplaceNode(dependenciesBaseXPath, s"/project/dependencies/dependency/artifactId[text()='$artifactId' and ../groupId[text() = '$groupId']]/..", dependency, s"""<dependency><groupId>$groupId</groupId><artifactId>$artifactId</artifactId><version>$version</version><scope>$scope</scope></dependency>""") @ExportFunction(readOnly = false, description = "Add or replace a dependency's version") def addOrReplaceDependencyVersion(@ExportFunctionParameterDescription(name = "groupId", description = "The value of dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "newVersion", description = "The value of the dependency's version to be set") newVersion: String): Unit = addOrReplaceDependencySubNode(artifactId, groupId, version, newVersion) @ExportFunction(readOnly = false, description = "Remove a dependency's version") def removeDependencyVersion(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String): Unit = removeDependencySubNode(artifactId, groupId, version) @ExportFunction(readOnly = false, description = "Add or replace a dependency's scope") def addOrReplaceDependencyScope(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "newScope", description = "The new value of the dependency's scope to be set") newScope: String): Unit = addOrReplaceDependencySubNode(artifactId, groupId, scope, newScope) @ExportFunction(readOnly = false, description = "Remove a dependency's scope") def removeDependencyScope(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String): Unit = removeDependencySubNode(artifactId, groupId, scope) @ExportFunction(readOnly = false, description = "Removes a dependency") def removeDependency(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String): Unit = deleteNode(s"/project/dependencies/dependency/artifactId[text()='$artifactId' and ../groupId[text() = '$groupId']]/..") private def removeDependencySubNode(artifactId: String, groupId: String, subnode: String): Unit = deleteNode(s"/project/dependencies/dependency/artifactId[text()='$artifactId' and ../groupId[text() = '$groupId']]/../$subnode") private def addOrReplaceDependencySubNode(artifactId: String, groupId: String, subnode: String, content: String): Unit = addOrReplaceNode(s"/project/dependencies/dependency/artifactId [text()='$artifactId' and ../groupId [text() = '$groupId']]/..", s"/project/dependencies/dependency/artifactId[text()='$artifactId' and ../groupId[text() = '$groupId']]/../$subnode", subnode, s"""<$subnode>$content</$subnode>""") @ExportFunction(readOnly = false, description = "Adds or replaces a build plugin") def addOrReplaceBuildPlugin(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the build plugin's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the build plugin's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "pluginContent", description = "The XML content for the plugin") pluginContent: String): Unit = addOrReplaceNode(buildPluginsBaseXPath, s"/project/build/plugins/plugin/artifactId [text()='$artifactId' and ../groupId [text() = '$groupId']]/..", plugin, pluginContent) @ExportFunction(readOnly = false, description = "Adds or replaces a build plugin management plugin") def addOrReplaceBuildPluginManagementPlugin(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the build plugin management plugins's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the build plugin management plugins's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "pluginContent", description = "The XML content for the plugin") pluginContent: String): Unit = { addBuildPluginManagementSectionIfNotPresent() addOrReplaceNode(buildPluginManagementBaseXPath + "/plugins", s"/project/build/pluginManagement/plugins/plugin/artifactId [text()='$artifactId' and ../groupId [text() = '$groupId']]/..", plugin, pluginContent) } @ExportFunction(readOnly = false, description = "Adds or replaces a dependency management dependency") def addOrReplaceDependencyManagementDependency(@ExportFunctionParameterDescription(name = "groupId", description = "The value of the dependency's groupId") groupId: String, @ExportFunctionParameterDescription(name = "artifactId", description = "The value of the dependency's artifactId") artifactId: String, @ExportFunctionParameterDescription(name = "dependencyContent", description = "The XML content for the dependency") dependencyContent: String): Unit = { addDependencyManagementSectionIfNotPresent() addOrReplaceNode(s"$dependencyManagementBaseXPath/$dependencies", s"/$dependencyManagementBaseXPath/$dependencies/$dependency/$mavenArtifactId [text()='$artifactId' and ../$mavenGroupId [text() = '$groupId']]/..", dependency, dependencyContent) } @ExportFunction(readOnly = false, description = "Adds or replaces a profile") def addOrReplaceProfile(@ExportFunctionParameterDescription(name = "id", description = "The value of the profile's id") id: String, @ExportFunctionParameterDescription(name = "profileContent", description = "The XML content for the profile") profileContent: String): Unit = { addProfilesSectionIfNotPresent() addOrReplaceNode(s"$projectBaseXPath/profiles", s"/$projectBaseXPath/profiles/profile/id [text()='$id']/..", "profile", profileContent) } private def addProfilesSectionIfNotPresent(): Unit = { addNodeIfNotPresent(projectBaseXPath, s"$projectBaseXPath/profiles", "profiles", "<profiles><profile></profile></profiles>") } private def addBuildPluginManagementSectionIfNotPresent(): Unit = { addNodeIfNotPresent(buildBaseXPath, buildPluginManagementBaseXPath, pluginManagement, s"<$pluginManagement><$plugins></$plugins></$pluginManagement>") } private def addDependencyManagementSectionIfNotPresent(): Unit = { addNodeIfNotPresent(projectBaseXPath, dependencyManagementBaseXPath, dependencyManagement, s"<$dependencyManagement><$dependencies></$dependencies></$dependencyManagement>") } } class PomMutableView( originalBackingObject: FileArtifact, parent: ProjectMutableView) extends XmlMutableView(originalBackingObject, parent) with TerminalView[FileArtifact] with PomMutableViewNonMutatingFunctions with PomMutableViewMutatingFunctions
atomist/rug
src/main/scala/com/atomist/rug/kind/pom/PomMutableView.scala
Scala
gpl-3.0
26,909
package com.atomist.rug.runtime import com.atomist.param.Tag /** * For things common to _all_ Rugs */ trait Rug { def name: String def description: String def tags: Seq[Tag] }
atomist/rug
src/main/scala/com/atomist/rug/runtime/Rug.scala
Scala
gpl-3.0
191
package com.scalableminds.util.requestlogging import com.typesafe.scalalogging.LazyLogging import play.api.http.{HttpEntity, Status} import play.api.mvc.{Request, Result} import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration._ trait AbstractRequestLogging extends LazyLogging { def logRequestFormatted(request: Request[_], result: Result, notifier: Option[String => Unit], requesterId: Option[String] = None): Unit = { if (Status.isSuccessful(result.header.status)) return val userIdMsg = requesterId.map(id => s" for user $id").getOrElse("") val resultMsg = s": ${resultBody(result)}" val msg = s"Answering ${result.header.status} at ${request.uri}$userIdMsg$resultMsg" logger.warn(msg) notifier.foreach(_(msg)) } private def resultBody(result: Result): String = result.body match { case HttpEntity.Strict(byteString, _) => byteString.take(20000).decodeString("utf-8") case _ => "" } } trait RequestLogging extends AbstractRequestLogging { // Hint: within webKnossos itself, UserAwareRequestLogging is available, which additionally logs the requester user id def log(notifier: Option[String => Unit] = None)(block: => Future[Result])(implicit request: Request[_], ec: ExecutionContext): Future[Result] = for { result: Result <- block _ = logRequestFormatted(request, result, notifier) } yield result def logTime(notifier: String => Unit, durationThreshold: FiniteDuration = 10 seconds)( block: => Future[Result])(implicit request: Request[_], ec: ExecutionContext): Future[Result] = { def logTimeFormatted(executionTime: Long, request: Request[_], result: Result): Unit = { val debugString = s"Request ${request.method} ${request.uri} took ${BigDecimal(executionTime / 1e9) .setScale(2, BigDecimal.RoundingMode.HALF_UP)} seconds and was${if (result.header.status != 200) " not " else " "}successful" logger.info(debugString) notifier(debugString) } val start = System.nanoTime() for { result: Result <- block executionTime = System.nanoTime() - start _ = if (executionTime > durationThreshold.toNanos) logTimeFormatted(executionTime, request, result) } yield result } }
scalableminds/webknossos
util/src/main/scala/com/scalableminds/util/requestlogging/RequestLogging.scala
Scala
agpl-3.0
2,502
package streaming import org.apache.spark.streaming.{Seconds, StreamingContext} import org.apache.spark.{SparkContext, SparkConf} object Windowing { def main (args: Array[String]) { val conf = new SparkConf().setAppName("Windowing").setMaster("local[4]") val sc = new SparkContext(conf) // streams will produce data every second val ssc = new StreamingContext(sc, Seconds(1)) val qm = new QueueMaker(sc, ssc) // create the stream val stream = qm.inputStream // register for data -- a five second sliding window every two seconds stream.window(Seconds(5), Seconds(2)).foreachRDD(r => { if (r.count() == 0) println("Empty") else println("Count = " + r.count() + " min = " + r.min()+ " max = " + r.max()) }) // start streaming ssc.start() new Thread("Delayed Termination") { override def run() { qm.populateQueue() Thread.sleep(20000) println("*** stopping streaming") ssc.stop() } }.start() try { ssc.awaitTermination() println("*** streaming terminated") } catch { case e: Exception => { println("*** streaming exception caught in monitor thread") } } } }
chocolateBlack/LearningSpark
src/main/scala/streaming/Windowing.scala
Scala
mit
1,239
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.rdd import java.io.{IOException, ObjectOutputStream} import scala.collection.mutable.ArrayBuffer import scala.reflect.ClassTag import org.apache.spark.{Dependency, Partition, RangeDependency, SparkContext, TaskContext} import org.apache.spark.annotation.DeveloperApi import org.apache.spark.util.Utils /** * Partition for UnionRDD. * * @param idx index of the partition * @param rdd the parent RDD this partition refers to * @param parentRddIndex index of the parent RDD this partition refers to * @param parentRddPartitionIndex index of the partition within the parent RDD * this partition refers to */ private[spark] class UnionPartition[T: ClassTag]( idx: Int, @transient rdd: RDD[T], val parentRddIndex: Int, @transient parentRddPartitionIndex: Int) extends Partition { var parentPartition: Partition = rdd.partitions(parentRddPartitionIndex) def preferredLocations() = rdd.preferredLocations(parentPartition) override val index: Int = idx @throws(classOf[IOException]) private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException { // Update the reference to parent split at the time of task serialization parentPartition = rdd.partitions(parentRddPartitionIndex) oos.defaultWriteObject() } } @DeveloperApi class UnionRDD[T: ClassTag]( sc: SparkContext, var rdds: Seq[RDD[T]]) extends RDD[T](sc, Nil) { // Nil since we implement getDependencies override def getPartitions: Array[Partition] = { val array = new Array[Partition](rdds.map(_.partitions.size).sum) var pos = 0 for ((rdd, rddIndex) <- rdds.zipWithIndex; split <- rdd.partitions) { array(pos) = new UnionPartition(pos, rdd, rddIndex, split.index) pos += 1 } array } override def getDependencies: Seq[Dependency[_]] = { val deps = new ArrayBuffer[Dependency[_]] var pos = 0 for (rdd <- rdds) { deps += new RangeDependency(rdd, 0, pos, rdd.partitions.size) pos += rdd.partitions.size } deps } override def compute(s: Partition, context: TaskContext): Iterator[T] = { val part = s.asInstanceOf[UnionPartition[T]] parent[T](part.parentRddIndex).iterator(part.parentPartition, context) } override def getPreferredLocations(s: Partition): Seq[String] = s.asInstanceOf[UnionPartition[T]].preferredLocations() override def clearDependencies() { super.clearDependencies() rdds = null } }
Dax1n/spark-core
core/src/main/scala/org/apache/spark/rdd/UnionRDD.scala
Scala
apache-2.0
3,298
package com.azakordonets.entities import com.azakordonets.enums.DateFormat import org.joda.time.{DateTime, DateTimeZone} class RelativeDate(private val initialDate: DateTime = DateTime.now()) { private var date = initialDate def this(timeZone: DateTimeZone) { this(DateTime.now(timeZone)) } def tomorrow(): this.type = { date = date.plusDays(1) this } def yesterday(): this.type = { date = date.minusDays(1) this } def years(years: Int): this.type = { years match { case `years` if years > 0 => date = date.plusYears(years) case `years` if years < 0 => date = date.minusYears(Math.abs(years)) case _ => // skip } this } def months(months: Int): this.type = { months match { case `months` if months > 0 => date = date.plusMonths(months) case `months` if months < 0 => date = date.minusMonths(Math.abs(months)) case _ => // skip } this } def weeks(weeks: Int): this.type = { weeks match { case `weeks` if weeks > 0 => date = date.plusWeeks(weeks) case `weeks` if weeks < 0 => date = date.minusWeeks(Math.abs(weeks)) case _ => // skip } this } def days(days: Int): this.type = { days match { case `days` if days > 0 => date = date.plusDays(days) case `days` if days < 0 => date = date.minusDays(Math.abs(days)) case _ => // skip } this } def hours(hours: Int): this.type = { hours match { case `hours` if hours > 0 => date = date.plusHours(hours) case `hours` if hours < 0 => date = date.minusHours(Math.abs(hours)) case _ => // skip } this } def minutes(minutes: Int): this.type = { minutes match { case `minutes` if minutes > 0 => date = date.plusMinutes(minutes) case `minutes` if minutes < 0 => date = date.minusMinutes(Math.abs(minutes)) case _ => // skip } this } def seconds(seconds: Int): this.type = { seconds match { case `seconds` if seconds > 0 => date = date.plusSeconds(seconds) case `seconds` if seconds < 0 => date = date.minusSeconds(Math.abs(seconds)) case _ => // skip } this } def asString: String = asString(DateFormat.dd_MM_yyyy) def asString(format: DateFormat): String = date.toString(format.getFormat) def asDate: DateTime = date }
azakordonets/Utils
src/main/scala/com/azakordonets/entities/RelativeDate.scala
Scala
apache-2.0
2,350
package at.logic.gapt.testing import at.logic.gapt.expr._ import at.logic.gapt.proofs.expansion.InstanceTermEncoding import at.logic.gapt.proofs.loadExpansionProof import scala.App import ammonite.ops._ object dumpTermset extends App { val Array( inputFileName, outputFileName ) = args val inputPath = Path( inputFileName, pwd ) val outputPath = Path( outputFileName, pwd ) def simplifyNames( termset: Set[FOLTerm] ): Set[FOLTerm] = { val renaming: Map[Expr, Expr] = ( constants( termset ).toSeq ++ freeVariables( termset ).toSeq ).sortBy( _.toString ). zipWithIndex.map { case ( c, i ) => c -> Const( s"f$i", c.ty ) }. toMap termset.map( TermReplacement( _, renaming ).asInstanceOf[FOLTerm] ) } def termToString( t: FOLTerm ): String = t match { case FOLConst( f ) => s"$f" case FOLFunction( f, args ) => s"$f(${args map termToString mkString ","})" } def writeTermset( outFile: Path, termset: Set[FOLTerm] ) = write.over( outFile, termset.map( termToString ).toSeq.sorted.map( _ + "\\n" ).mkString ) val expansionProof = loadExpansionProof( inputPath ) val encoding = InstanceTermEncoding( expansionProof.shallow, Ti ) val termSet = encoding.encode( expansionProof ).map( _.asInstanceOf[FOLTerm] ) writeTermset( outputPath, simplifyNames( termSet ) ) }
gebner/gapt
testing/src/main/scala/termsets.scala
Scala
gpl-3.0
1,335
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.internal import java.net.URL import java.util.Locale import scala.reflect.ClassTag import scala.util.control.NonFatal import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.FsUrlStreamHandlerFactory import org.apache.spark.{SparkConf, SparkContext, SparkException} import org.apache.spark.internal.Logging import org.apache.spark.sql.SQLContext import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.execution.CacheManager import org.apache.spark.sql.execution.ui.{SQLAppStatusListener, SQLAppStatusStore, SQLTab} import org.apache.spark.sql.internal.StaticSQLConf._ import org.apache.spark.status.ElementTrackingStore import org.apache.spark.util.{MutableURLClassLoader, Utils} /** * A class that holds all state shared across sessions in a given [[SQLContext]]. */ private[sql] class SharedState(val sparkContext: SparkContext) extends Logging { // Load hive-site.xml into hadoopConf and determine the warehouse path we want to use, based on // the config from both hive and Spark SQL. Finally set the warehouse config value to sparkConf. val warehousePath: String = { val configFile = Utils.getContextOrSparkClassLoader.getResource("hive-site.xml") if (configFile != null) { logInfo(s"loading hive config file: $configFile") sparkContext.hadoopConfiguration.addResource(configFile) } // hive.metastore.warehouse.dir only stay in hadoopConf sparkContext.conf.remove("hive.metastore.warehouse.dir") // Set the Hive metastore warehouse path to the one we use val hiveWarehouseDir = sparkContext.hadoopConfiguration.get("hive.metastore.warehouse.dir") if (hiveWarehouseDir != null && !sparkContext.conf.contains(WAREHOUSE_PATH.key)) { // If hive.metastore.warehouse.dir is set and spark.sql.warehouse.dir is not set, // we will respect the value of hive.metastore.warehouse.dir. sparkContext.conf.set(WAREHOUSE_PATH.key, hiveWarehouseDir) logInfo(s"${WAREHOUSE_PATH.key} is not set, but hive.metastore.warehouse.dir " + s"is set. Setting ${WAREHOUSE_PATH.key} to the value of " + s"hive.metastore.warehouse.dir ('$hiveWarehouseDir').") hiveWarehouseDir } else { // If spark.sql.warehouse.dir is set, we will override hive.metastore.warehouse.dir using // the value of spark.sql.warehouse.dir. // When neither spark.sql.warehouse.dir nor hive.metastore.warehouse.dir is set, // we will set hive.metastore.warehouse.dir to the default value of spark.sql.warehouse.dir. val sparkWarehouseDir = sparkContext.conf.get(WAREHOUSE_PATH) logInfo(s"Setting hive.metastore.warehouse.dir ('$hiveWarehouseDir') to the value of " + s"${WAREHOUSE_PATH.key} ('$sparkWarehouseDir').") sparkContext.hadoopConfiguration.set("hive.metastore.warehouse.dir", sparkWarehouseDir) sparkWarehouseDir } } logInfo(s"Warehouse path is '$warehousePath'.") /** * Class for caching query results reused in future executions. */ val cacheManager: CacheManager = new CacheManager /** * A status store to query SQL status/metrics of this Spark application, based on SQL-specific * [[org.apache.spark.scheduler.SparkListenerEvent]]s. */ val statusStore: SQLAppStatusStore = { val kvStore = sparkContext.statusStore.store.asInstanceOf[ElementTrackingStore] val listener = new SQLAppStatusListener(sparkContext.conf, kvStore, live = true) sparkContext.listenerBus.addToStatusQueue(listener) val statusStore = new SQLAppStatusStore(kvStore, Some(listener)) sparkContext.ui.foreach(new SQLTab(statusStore, _)) statusStore } /** * A catalog that interacts with external systems. */ lazy val externalCatalog: ExternalCatalog = { val externalCatalog = SharedState.reflect[ExternalCatalog, SparkConf, Configuration]( SharedState.externalCatalogClassName(sparkContext.conf), sparkContext.conf, sparkContext.hadoopConfiguration) val defaultDbDefinition = CatalogDatabase( SessionCatalog.DEFAULT_DATABASE, "default database", CatalogUtils.stringToURI(warehousePath), Map()) // Create default database if it doesn't exist if (!externalCatalog.databaseExists(SessionCatalog.DEFAULT_DATABASE)) { // There may be another Spark application creating default database at the same time, here we // set `ignoreIfExists = true` to avoid `DatabaseAlreadyExists` exception. externalCatalog.createDatabase(defaultDbDefinition, ignoreIfExists = true) } // Make sure we propagate external catalog events to the spark listener bus externalCatalog.addListener(new ExternalCatalogEventListener { override def onEvent(event: ExternalCatalogEvent): Unit = { sparkContext.listenerBus.post(event) } }) externalCatalog } /** * A manager for global temporary views. */ lazy val globalTempViewManager: GlobalTempViewManager = { // System preserved database should not exists in metastore. However it's hard to guarantee it // for every session, because case-sensitivity differs. Here we always lowercase it to make our // life easier. val globalTempDB = sparkContext.conf.get(GLOBAL_TEMP_DATABASE).toLowerCase(Locale.ROOT) if (externalCatalog.databaseExists(globalTempDB)) { throw new SparkException( s"$globalTempDB is a system preserved database, please rename your existing database " + "to resolve the name conflict, or set a different value for " + s"${GLOBAL_TEMP_DATABASE.key}, and launch your Spark application again.") } new GlobalTempViewManager(globalTempDB) } /** * A classloader used to load all user-added jar. */ val jarClassLoader = new NonClosableMutableURLClassLoader( org.apache.spark.util.Utils.getContextOrSparkClassLoader) } object SharedState extends Logging { try { URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory()) } catch { case e: Error => logWarning("URL.setURLStreamHandlerFactory failed to set FsUrlStreamHandlerFactory") } private val HIVE_EXTERNAL_CATALOG_CLASS_NAME = "org.apache.spark.sql.hive.HiveExternalCatalog" private def externalCatalogClassName(conf: SparkConf): String = { conf.get(CATALOG_IMPLEMENTATION) match { case "hive" => HIVE_EXTERNAL_CATALOG_CLASS_NAME case "in-memory" => classOf[InMemoryCatalog].getCanonicalName } } /** * Helper method to create an instance of [[T]] using a single-arg constructor that * accepts an [[Arg1]] and an [[Arg2]]. */ private def reflect[T, Arg1 <: AnyRef, Arg2 <: AnyRef]( className: String, ctorArg1: Arg1, ctorArg2: Arg2)( implicit ctorArgTag1: ClassTag[Arg1], ctorArgTag2: ClassTag[Arg2]): T = { try { val clazz = Utils.classForName(className) val ctor = clazz.getDeclaredConstructor(ctorArgTag1.runtimeClass, ctorArgTag2.runtimeClass) val args = Array[AnyRef](ctorArg1, ctorArg2) ctor.newInstance(args: _*).asInstanceOf[T] } catch { case NonFatal(e) => throw new IllegalArgumentException(s"Error while instantiating '$className':", e) } } } /** * URL class loader that exposes the `addURL` and `getURLs` methods in URLClassLoader. * This class loader cannot be closed (its `close` method is a no-op). */ private[sql] class NonClosableMutableURLClassLoader(parent: ClassLoader) extends MutableURLClassLoader(Array.empty, parent) { override def close(): Unit = {} }
brad-kaiser/spark
sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
Scala
apache-2.0
8,363
package ru.maizy.ambient7.core.config.options /** * Copyright (c) Nikita Kovaliov, maizy.ru, 2016-2017 * See LICENSE.txt for details. */ import ru.maizy.ambient7.core.config.Defaults import ru.maizy.influxdbclient.InfluxDbConnectionSettings case class InfluxDbOptions( database: Option[String] = None, baseUrl: String = Defaults.INFLUXDB_BASEURL, user: Option[String] = None, password: Option[String] = None, readonlyBaseUrl: Option[String] = None, readonlyUser: Option[String] = None, readonlyPassword: Option[String] = None ) { lazy val clientConnectionSettings: Option[InfluxDbConnectionSettings] = database.map( InfluxDbConnectionSettings( baseUrl, _, user, password ) ) lazy val readonlyClientConnectionSetting: Option[InfluxDbConnectionSettings] = database.map( InfluxDbConnectionSettings( readonlyBaseUrl.getOrElse(baseUrl), _, readonlyUser, readonlyPassword ) ) }
maizy/ambient7
core/src/main/scala/ru/maizy/ambient7/core/config/options/InfluxDbOptions.scala
Scala
apache-2.0
1,018
/** * Copyright (c) 2014-2016 Snowplow Analytics Ltd. * All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache * License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. * * See the Apache License Version 2.0 for the specific language * governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow package storage package utils import org.json4s._ import org.json4s.jackson.JsonMethods._ import org.json4s.JsonDSL._ import com.typesafe.config.Config import com.snowplowanalytics.snowplow.scalatracker.Tracker import com.snowplowanalytics.snowplow.scalatracker.SelfDescribingJson import com.snowplowanalytics.snowplow.scalatracker.emitters.AsyncEmitter /** * Functionality for sending Snowplow events for monitoring purposes */ object Tracking { private val HeartbeatInterval = 300000L private val StorageType = "ELASTICSEARCH" /** * Configure a Tracker based on the configuration HOCON * * @param config The "monitoring.snowplow" section of the HOCON * @return a new tracker instance */ def initializeTracker(config: Config): Tracker = { val endpoint = config.getString("collector-uri") val port = config.getInt("collector-port") val appName = config.getString("app-id") // Not yet used val method = config.getString("method") val emitter = AsyncEmitter.createAndStart(endpoint, port) new Tracker(List(emitter), generated.Settings.name, appName) } /** * If a tracker has been configured, send a sink_write_failed event * * @param tracker a Tracker instance * @param lastRetryPeriod the backoff period between attempts * @param failureCount the number of consecutive failed writes * @param initialFailureTime Time of the first consecutive failed write * @param message What went wrong */ def sendFailureEvent(tracker: Tracker, lastRetryPeriod: Long, failureCount: Long, initialFailureTime: Long, message: String): Unit = tracker.trackUnstructEvent( SelfDescribingJson( "iglu:com.snowplowanalytics.monitoring.kafka/storage_write_failed/jsonschema/1-0-0", ("storage" -> StorageType) ~ ("failureCount" -> failureCount) ~ ("initialFailureTime" -> initialFailureTime) ~ ("lastRetryPeriod" -> lastRetryPeriod) ~ ("message" -> message) )) /** * Send an initialization event and schedule heartbeat and shutdown events * * @param tracker a Tracker instance */ def initializeSnowplowTracking(tracker: Tracker): Unit = { trackApplicationInitialization(tracker) Runtime.getRuntime.addShutdownHook(new Thread() { override def run(): Unit = trackApplicationShutdown(tracker) }) val heartbeatThread = new Thread { override def run(): Unit = while (true) { trackApplicationHeartbeat(tracker, HeartbeatInterval) Thread.sleep(HeartbeatInterval) } } heartbeatThread.start() } /** * Send an application_initialized unstructured event * * @param tracker a Tracker instance */ private def trackApplicationInitialization(tracker: Tracker): Unit = tracker.trackUnstructEvent( SelfDescribingJson( "iglu:com.snowplowanalytics.monitoring.kafka/app_initialized/jsonschema/1-0-0", JObject(Nil) )) /** * Send an application_shutdown unstructured event * * @param tracker a Tracker instance */ def trackApplicationShutdown(tracker: Tracker): Unit = tracker.trackUnstructEvent( SelfDescribingJson( "iglu:com.snowplowanalytics.monitoring.kafka/app_shutdown/jsonschema/1-0-0", JObject(Nil) )) /** * Send a heartbeat unstructured event * * @param tracker a Tracker instance * @param heartbeatInterval Time between heartbeats in milliseconds */ private def trackApplicationHeartbeat(tracker: Tracker, heartbeatInterval: Long): Unit = tracker.trackUnstructEvent( SelfDescribingJson( "iglu:com.snowplowanalytics.monitoring.kafka/app_heartbeat/jsonschema/1-0-0", "interval" -> heartbeatInterval )) }
TimothyKlim/snowplow
4-storage/kafka-elasticsearch-sink/src/main/scala/com/snowplowanalytics/snowplow/storage/utils/Tracking.scala
Scala
apache-2.0
4,723
/* * Copyright (C) 2013 Alcatel-Lucent. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Licensed to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package molecule.examples.io.stopwatch import molecule._ import io._ import javax.swing.SwingUtilities /** * @author Sebastien Bocq */ object SwingExecutor extends SysIO { def execute(r: Runnable) { SwingUtilities.invokeLater(r) } }
molecule-labs/molecule
molecule-io-examples/src/main/scala/molecule/examples/io/stopwatch/SwingExecutor.scala
Scala
apache-2.0
967
package org.scaladebugger.api.profiles.java.info import com.sun.jdi._ import org.scaladebugger.api.lowlevel.{InvokeNonVirtualArgument, InvokeSingleThreadedArgument, JDIArgument} import org.scaladebugger.api.profiles.traits.info._ import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine import org.scaladebugger.test.helpers.ParallelMockFunSpec import org.scalamock.scalatest.MockFactory import org.scalatest.{FunSpec, Matchers, ParallelTestExecution} class JavaObjectInfoSpec extends ParallelMockFunSpec { private val mockNewFieldProfile = mockFunction[Field, Int, FieldVariableInfo] private val mockNewMethodProfile = mockFunction[Method, MethodInfo] private val mockNewValueProfile = mockFunction[Value, ValueInfo] private val mockNewTypeProfile = mockFunction[Type, TypeInfo] private val mockNewTypeCheckerProfile = mockFunction[TypeChecker] private val mockScalaVirtualMachine = mock[ScalaVirtualMachine] private val mockInfoProducerProfile = mock[InfoProducer] private val mockVirtualMachine = mock[VirtualMachine] private val mockReferenceType = mock[ReferenceType] private val mockObjectReference = mock[ObjectReference] private val javaObjectInfoProfile = new JavaObjectInfo( mockScalaVirtualMachine, mockInfoProducerProfile, mockObjectReference )( _virtualMachine = mockVirtualMachine, _referenceType = mockReferenceType ) { override protected def newFieldProfile(field: Field, offsetIndex: Int): FieldVariableInfo = mockNewFieldProfile(field, offsetIndex) override protected def newMethodProfile(method: Method): MethodInfo = mockNewMethodProfile(method) override protected def newValueProfile(value: Value): ValueInfo = mockNewValueProfile(value) override protected def newTypeCheckerProfile(): TypeChecker = mockNewTypeCheckerProfile() override protected def newTypeProfile(_type: Type): TypeInfo = mockNewTypeProfile(_type) } describe("JavaObjectInfo") { describe("#toJavaInfo") { it("should return a new instance of the Java profile representation") { val expected = mock[ObjectInfo] val mockF = mockFunction[VirtualMachine, ReferenceType, ObjectInfo] // Get Java version of info producer (mockInfoProducerProfile.toJavaInfo _).expects() .returning(mockInfoProducerProfile).once() // Create new info profile using Java version of info producer // NOTE: Cannot validate second set of args because they are // call-by-name, which ScalaMock does not support presently (mockInfoProducerProfile.newObjectInfo( _: ScalaVirtualMachine, _: ObjectReference )( _: VirtualMachine, _: ReferenceType )).expects( mockScalaVirtualMachine, mockObjectReference, *, * ).returning(expected).once() val actual = javaObjectInfoProfile.toJavaInfo actual should be (expected) } } describe("#isJavaInfo") { it("should return true") { val expected = true val actual = javaObjectInfoProfile.isJavaInfo actual should be (expected) } } describe("#toJdiInstance") { it("should return the JDI instance this profile instance represents") { val expected = mockObjectReference val actual = javaObjectInfoProfile.toJdiInstance actual should be (expected) } } describe("#typeInfo") { it("should should return a new reference type profile wrapping the type") { val expected = mock[ReferenceTypeInfo] val mockType = mock[Type] (mockObjectReference.`type` _).expects() .returning(mockType).once() val mockTypeInfoProfile = mock[TypeInfo] mockNewTypeProfile.expects(mockType) .returning(mockTypeInfoProfile).once() (mockTypeInfoProfile.toReferenceType _).expects() .returning(expected).once() val actual = javaObjectInfoProfile.`type` actual should be (expected) } } describe("#uniqueId") { it("should return the unique id of the object") { val expected = 12345L (mockObjectReference.uniqueID _).expects().returning(expected).once() val actual = javaObjectInfoProfile.uniqueId actual should be (expected) } } describe("#referenceType") { it("should return a profile wrapping the object's reference type") { val expected = mock[ReferenceTypeInfo] val mockReferenceType = mock[ReferenceType] (mockObjectReference.referenceType _).expects() .returning(mockReferenceType).once() val mockTypeInfoProfile = mock[TypeInfo] mockNewTypeProfile.expects(mockReferenceType) .returning(mockTypeInfoProfile).once() (mockTypeInfoProfile.toReferenceType _).expects() .returning(expected).once() val actual = javaObjectInfoProfile.referenceType actual should be (expected) } } describe("#invoke(thread profile, method profile, arguments, JDI arguments)") { it("should invoke using the provided thread and method, returning wrapper profile of value") { val expected = mock[ValueInfo] val mockThreadReference = mock[ThreadReference] val mockThreadInfoProfile = mock[ThreadInfo] (mockThreadInfoProfile.toJdiInstance _).expects() .returning(mockThreadReference).once() val mockMethod = mock[Method] val javaMethodInfoProfile = new JavaMethodInfo( mockScalaVirtualMachine, mockInfoProducerProfile, mockMethod ) // Object method is invoked val mockValue = mock[Value] import scala.collection.JavaConverters._ (mockObjectReference.invokeMethod _).expects( mockThreadReference, mockMethod, Seq[Value]().asJava, 0 ).returning(mockValue).once() // Profile is created for return value mockNewValueProfile.expects(mockValue).returning(expected).once() val actual = javaObjectInfoProfile.invoke( mockThreadInfoProfile, javaMethodInfoProfile, Nil ) actual should be (expected) } it("should invoke using the provided arguments") { val arguments = Seq(1) val mockThreadReference = mock[ThreadReference] val mockThreadInfoProfile = mock[ThreadInfo] (mockThreadInfoProfile.toJdiInstance _).expects() .returning(mockThreadReference).once() val mockMethod = mock[Method] val javaMethodInfoProfile = new JavaMethodInfo( mockScalaVirtualMachine, mockInfoProducerProfile, mockMethod ) // Arguments are mirrored remotely val mockValues = Seq(mock[IntegerValue]) arguments.zip(mockValues).foreach { case (ar, ma) => (mockVirtualMachine.mirrorOf(_: Int)).expects(ar) .returning(ma).once() } // Object method is invoked val mockValue = mock[Value] import scala.collection.JavaConverters._ (mockObjectReference.invokeMethod _).expects( *, *, mockValues.asJava, * ).returning(mockValue).once() // Profile is created for return value mockNewValueProfile.expects(*).once() javaObjectInfoProfile.invoke( mockThreadInfoProfile, javaMethodInfoProfile, arguments ) } it("should provide relevant JDI options as an OR'd value") { val jdiArguments = Seq( InvokeNonVirtualArgument, InvokeSingleThreadedArgument ) val mockThreadReference = mock[ThreadReference] val mockThreadInfoProfile = mock[ThreadInfo] (mockThreadInfoProfile.toJdiInstance _).expects() .returning(mockThreadReference).once() val mockMethod = mock[Method] val javaMethodInfoProfile = new JavaMethodInfo( mockScalaVirtualMachine, mockInfoProducerProfile, mockMethod ) // Object method is invoked // NOTE: Both arguments OR'd together is 3 (1 | 2) (mockObjectReference.invokeMethod _).expects(*, *, *, 3) .returning(mock[Value]).once() // Profile is created for return value mockNewValueProfile.expects(*).returning(null).once() javaObjectInfoProfile.invoke( mockThreadInfoProfile, javaMethodInfoProfile, Nil, jdiArguments: _* ) } } describe("#methods") { it("should return a collection of profiles wrapping the object's visible methods") { val expected = Seq(mock[MethodInfo]) // Lookup the visible methods import scala.collection.JavaConverters._ val mockMethods = Seq(mock[Method]) (mockReferenceType.visibleMethods _).expects() .returning(mockMethods.asJava).once() // Create the new profiles for the methods mockMethods.zip(expected).foreach { case (m, e) => mockNewMethodProfile.expects(m).returning(e).once() } val actual = javaObjectInfoProfile.methods actual should be (expected) } } describe("#methodOption") { it("should return None if no method with matching name is found") { val expected = None val name = "someName" val paramTypes = Seq("some.type") // Lookup the method and return empty list indicating no method found import scala.collection.JavaConverters._ (mockReferenceType.methodsByName(_: String)).expects(name) .returning(Seq[Method]().asJava).once() val actual = javaObjectInfoProfile.methodOption(name, paramTypes: _*) actual should be (expected) } it("should return None if no method with matching parameters is found") { val expected = None val name = "someName" val paramTypes = Seq("some.type") // Lookup the method and return method indicating matching name found val mockMethod = mock[Method] import scala.collection.JavaConverters._ (mockReferenceType.methodsByName(_: String)).expects(name) .returning(Seq(mockMethod).asJava).once() (mockMethod.argumentTypeNames _).expects() .returning(paramTypes.map(_ + "other").asJava).once() // Arguments do not match, so return false val mockTypeCheckerProfile = mock[TypeChecker] mockNewTypeCheckerProfile.expects() .returning(mockTypeCheckerProfile).once() (mockTypeCheckerProfile.equalTypeNames _).expects(*, *) .returning(false).once() val actual = javaObjectInfoProfile.methodOption(name, paramTypes: _*) actual should be (expected) } it("should return Some profile wrapping the associated method if found") { val expected = Some(mock[MethodInfo]) val name = "someName" val paramTypes = Seq("some.type") // Lookup the method and return method indicating matching name found val mockMethod = mock[Method] import scala.collection.JavaConverters._ (mockReferenceType.methodsByName(_: String)).expects(name) .returning(Seq(mockMethod).asJava).once() (mockMethod.argumentTypeNames _).expects() .returning(paramTypes.asJava).once() // Arguments do match, so return true val mockTypeCheckerProfile = mock[TypeChecker] mockNewTypeCheckerProfile.expects() .returning(mockTypeCheckerProfile).once() (mockTypeCheckerProfile.equalTypeNames _).expects(*, *) .returning(true).once() // New method profile created mockNewMethodProfile.expects(mockMethod).returning(expected.get).once() val actual = javaObjectInfoProfile.methodOption(name, paramTypes: _*) actual should be (expected) } } describe("#fields") { it("should return a collection of profiles wrapping the object's visible fields") { val expected = Seq(mock[FieldVariableInfo]) // Lookup the visible fields import scala.collection.JavaConverters._ val mockFields = Seq(mock[Field]) (mockReferenceType.visibleFields _).expects() .returning(mockFields.asJava).once() // Create the new profiles for the fields mockFields.zip(expected).foreach { case (f, e) => mockNewFieldProfile.expects(f, -1).returning(e).once() } val actual = javaObjectInfoProfile.fields actual should be (expected) } } describe("#fieldOption") { it("should return None if no field with matching name is found") { val expected = None val name = "someName" // Lookup the field and return null indicating no field found (mockReferenceType.fieldByName _).expects(name) .returning(null).once() val actual = javaObjectInfoProfile.fieldOption(name) actual should be (expected) } it("should return Some profile wrapping the associated field if found") { val expected = Some(mock[FieldVariableInfo]) val name = "someName" // Lookup the field val mockField = mock[Field] (mockReferenceType.fieldByName _).expects(name) .returning(mockField).once() // Create the new profile mockNewFieldProfile.expects(mockField, -1).returning(expected.get).once() val actual = javaObjectInfoProfile.fieldOption(name) actual should be(expected) } } describe("#indexedFields") { it("should return a collection of profiles wrapping the object's visible fields") { val expected = Seq(mock[FieldVariableInfo]) // Lookup the visible fields import scala.collection.JavaConverters._ val mockFields = Seq(mock[Field]) (mockReferenceType.visibleFields _).expects() .returning(mockFields.asJava).once() // Create the new profiles for the fields mockFields.zip(expected).zipWithIndex.foreach { case ((f, e), i) => mockNewFieldProfile.expects(f, i).returning(e).once() } val actual = javaObjectInfoProfile.indexedFields actual should be (expected) } } describe("#indexedField") { it("should return None if no field with matching name is found") { val expected = None val name = "someName" // Lookup the visible fields (Nil indicates none) import scala.collection.JavaConverters._ (mockReferenceType.visibleFields _).expects() .returning(Seq[Field]().asJava).once() val actual = javaObjectInfoProfile.indexedFieldOption(name) actual should be (expected) } it("should return a profile wrapping the associated field if found") { val expected = Some(mock[FieldVariableInfo]) val name = "someName" // Lookup the visible fields val mockField = mock[Field] (expected.get.name _).expects().returning(name).once() import scala.collection.JavaConverters._ (mockReferenceType.visibleFields _).expects() .returning(Seq(mockField).asJava).once() // Create the new profile mockNewFieldProfile.expects(mockField, 0).returning(expected.get).once() val actual = javaObjectInfoProfile.indexedFieldOption(name) actual should be (expected) } } } }
ensime/scala-debugger
scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/java/info/JavaObjectInfoSpec.scala
Scala
apache-2.0
15,640
package org.scalajars.web import org.scalajars.core._ package object controllers { trait RedisStore extends RedisStoreImpl { def namespace = "scalajars" } object Browser extends Browser with RedisStore object Publisher extends Publisher with Users with RedisStore object Users extends Users with RedisStore }
teamon/scalajars.org
app/WebApp.scala
Scala
mit
328
package org.broadinstitute.clio.server.webservice import java.time.OffsetDateTime object MockRejectionDirectives extends RejectionDirectives(OffsetDateTime.MIN)
broadinstitute/clio
clio-server/src/test/scala/org/broadinstitute/clio/server/webservice/MockRejectionDirectives.scala
Scala
bsd-3-clause
163
import java.text.SimpleDateFormat import java.util.Date import com.typesafe.sbt.SbtScalariform._ import net.virtualvoid.sbt.graph.Plugin._ import sbt.Keys._ import sbt._ import sbtassembly.Plugin.AssemblyKeys._ import sbtassembly.Plugin._ import sbtbuildinfo.Plugin._ object Resolvers { val codebragResolvers = Seq( "Sonatype releases" at "http://oss.sonatype.org/content/repositories/releases/", "Sonatype snapshots" at "http://oss.sonatype.org/content/repositories/snapshots/", "SotwareMill Public Releases" at "https://nexus.softwaremill.com/content/repositories/releases/", "SotwareMill Public Snapshots" at "https://nexus.softwaremill.com/content/repositories/snapshots/", "TorqueBox Releases" at "http://rubygems-proxy.torquebox.org/releases", "RoundEights" at "http://maven.spikemark.net/roundeights" ) } object BuildSettings { import Resolvers._ val buildSettings = Defaults.coreDefaultSettings ++ net.virtualvoid.sbt.graph.Plugin.graphSettings ++ defaultScalariformSettings ++ Seq( organization := "com.softwaremill", version := "2.3.2", scalaVersion := "2.10.4", resolvers := codebragResolvers, scalacOptions += "-unchecked", classpathTypes ~= (_ + "orbit"), libraryDependencies ++= Dependencies.testingDependencies, libraryDependencies ++= Dependencies.logging, libraryDependencies ++= Seq(Dependencies.guava, Dependencies.googleJsr305), concurrentRestrictions in Global += Tags.limit(Tags.Test, 1) // no parallel execution of tests, because we are starting mongo in tests ) } object Dependencies { val slf4jVersion = "1.7.2" val logBackVersion = "1.0.9" val scalatraVersion = "2.2.2" val rogueVersion = "2.1.0" val scalaLoggingVersion = "1.0.1" val akkaVersion = "2.1.4" val jettyVersion = "8.1.7.v20120910" val slf4jApi = "org.slf4j" % "slf4j-api" % slf4jVersion val logBackClassic = "ch.qos.logback" % "logback-classic" % logBackVersion val log4jOverSlf4j = "org.slf4j" % "log4j-over-slf4j" % slf4jVersion val scalaLogging = "com.typesafe" %% "scalalogging-slf4j" % scalaLoggingVersion val logging = Seq(slf4jApi, logBackClassic, scalaLogging, log4jOverSlf4j) val guava = "com.google.guava" % "guava" % "13.0.1" val googleJsr305 = "com.google.code.findbugs" % "jsr305" % "2.0.1" val scalatra = "org.scalatra" %% "scalatra" % scalatraVersion val scalatraScalatest = "org.scalatra" %% "scalatra-scalatest" % scalatraVersion % "test" val scalatraJson = "org.scalatra" %% "scalatra-json" % scalatraVersion val json4s = "org.json4s" %% "json4s-jackson" % "3.2.10" val json4sExt = "org.json4s" %% "json4s-ext" % "3.2.10" val scalatraAuth = "org.scalatra" %% "scalatra-auth" % scalatraVersion exclude("commons-logging", "commons-logging") val jodaTime = "joda-time" % "joda-time" % "2.0" val jodaConvert = "org.joda" % "joda-convert" % "1.2" val commonsValidator = "commons-validator" % "commons-validator" % "1.4.0" exclude("commons-logging", "commons-logging") val commonsLang = "org.apache.commons" % "commons-lang3" % "3.1" val commonsCodec = "commons-codec" % "commons-codec" % "1.8" val jetty = "org.eclipse.jetty" % "jetty-webapp" % jettyVersion val mockito = "org.mockito" % "mockito-all" % "1.9.5" % "test" val scalatest = "org.scalatest" % "scalatest_2.10" % "1.9.1" % "test" val jodaDependencies = Seq(jodaTime, jodaConvert) val scalatraStack = Seq(scalatra, scalatraScalatest, scalatraJson, json4s, scalatraAuth, commonsLang) val akka = "com.typesafe.akka" %% "akka-actor" % akkaVersion val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % akkaVersion val akkaTestkit = "com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test" val typesafeConfig = "com.typesafe" % "config" % "1.0.1" val testingDependencies = Seq(mockito, scalatest, akkaTestkit) val javaxMail = "javax.mail" % "mail" % "1.4.5" val scalate = "org.fusesource.scalate" %% "scalate-core" % "1.6.1" val seleniumVer = "2.33.0" val seleniumJava = "org.seleniumhq.selenium" % "selenium-java" % seleniumVer % "test" val seleniumFirefox = "org.seleniumhq.selenium" % "selenium-firefox-driver" % seleniumVer % "test" val fest = "org.easytesting" % "fest-assert" % "1.4" % "test" val awaitility = "com.jayway.awaitility" % "awaitility-scala" % "1.3.5" % "test" val selenium = Seq(seleniumJava, seleniumFirefox, fest) // If the scope is provided;test, as in scalatra examples then gen-idea generates the incorrect scope (test). // As provided implies test, so is enough here. val servletApiProvided = "org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "provided" artifacts (Artifact("javax.servlet", "jar", "jar")) val bson = "org.mongodb" % "bson" % "2.7.1" val egitGithubApi = "org.eclipse.mylyn.github" % "org.eclipse.egit.github.core" % "2.1.3" val jGit = "org.eclipse.jgit" % "org.eclipse.jgit" % "2.3.1.201302201838-r" val jsch = "com.jcraft" % "jsch" % "0.1.51" val dispatch = "net.databinder.dispatch" %% "dispatch-core" % "0.9.5" val slick = "com.typesafe.slick" %% "slick" % "2.0.3" val h2 = "com.h2database" % "h2" % "1.3.175" val flyway = "com.googlecode.flyway" % "flyway-core" % "2.3" val c3p0 = "com.mchange" % "c3p0" % "0.9.5-pre6" val scalaval = "com.softwaremill.scalaval" %% "scalaval" % "0.1" val httpClient = "net.databinder.dispatch" %% "dispatch-core" % "0.11.2" val crypto = "com.roundeights" %% "hasher" % "1.0.0" } object SmlCodebragBuild extends Build { import BuildSettings._ import Dependencies._ import com.earldouglas.xwp._ val buildWebClient = TaskKey[Unit]( "build-web-client", "Builds browser client using Grunt.js" ) val webClientBuildSettings = Seq[Setting[_]](buildWebClient <<= { (scalaVersion, baseDirectory, projectID) map { (sv, bd, pid) => { val localGruntCommand = "./node_modules/.bin/grunt build" def updateDeps(cwd: File) = Process("npm install", cwd)! def runGrunt(cwd: File) = Process(localGruntCommand, cwd)! def haltOnError(result: Int) { if(result != 0) { throw new Exception("Building web client failed") } } println("Updating NPM dependencies") haltOnError(updateDeps(bd)) println("Building with Grunt.js") haltOnError(runGrunt(bd)) } } }) val runH2Console = TaskKey[Unit]("run-h2-console", "Runs the H2 console using the data file from the local config file") val runH2ConsoleSettings = fullRunTask(runH2Console, Compile, "com.softwaremill.codebrag.dao.sql.H2BrowserConsole") lazy val parent: Project = Project( "codebrag-root", file("."), settings = buildSettings ) aggregate(common, domain, dao, service, rest, ui, dist) lazy val common: Project = Project( "codebrag-common", file("codebrag-common"), settings = buildSettings ++ Seq(libraryDependencies ++= Seq(bson) ++ jodaDependencies ++ Seq(commonsCodec, typesafeConfig, crypto)) ++ buildInfoSettings ++ Seq( sourceGenerators in Compile <+= buildInfo, buildInfoPackage := "com.softwaremill.codebrag.version", buildInfoObject := "CodebragBuildInfo", buildInfoKeys := Seq[BuildInfoKey]( version, BuildInfoKey.action("buildDate")(new SimpleDateFormat("yyyy-MM-dd HH:mm").format(new Date())), BuildInfoKey.action("buildSha")((Process("git rev-parse HEAD") !!).stripLineEnd) ) ) ) lazy val domain: Project = Project( "codebrag-domain", file("codebrag-domain"), settings = buildSettings ++ Seq(libraryDependencies ++= Seq(bson, json4s, json4sExt, commonsLang) ++ jodaDependencies) ++ assemblySettings ++ Seq( assemblyOption in assembly ~= { _.copy(includeScala = false) }, excludedJars in assembly <<= (fullClasspath in assembly) map { cp => cp filter {_.data.getName == "config-1.0.1.jar"} } ) ) dependsOn (common) lazy val dao: Project = Project( "codebrag-dao", file("codebrag-dao"), settings = buildSettings ++ Seq(libraryDependencies ++= Seq(bson, typesafeConfig, slick, h2, flyway, c3p0), runH2ConsoleSettings) ) dependsOn(domain % "test->test;compile->compile", common) lazy val service: Project = Project( "codebrag-service", file("codebrag-service"), settings = buildSettings ++ Seq(libraryDependencies ++= Seq(commonsValidator, javaxMail, scalate, egitGithubApi, jGit, jsch, dispatch, json4s, json4sExt, commonsLang, scalaval, akka, akkaSlf4j)) ) dependsOn(domain, common, dao % "test->test;compile->compile") lazy val rest: Project = Project( "codebrag-rest", file("codebrag-rest"), settings = buildSettings ++ graphSettings ++ XwpPlugin.jetty() ++ Seq(libraryDependencies ++= scalatraStack ++ jodaDependencies ++ Seq(servletApiProvided, typesafeConfig)) ++ Seq( artifactName := { (config: ScalaVersion, module: ModuleID, artifact: Artifact) => "codebrag." + artifact.extension // produces nice war name -> http://stackoverflow.com/questions/8288859/how-do-you-remove-the-scala-version-postfix-from-artifacts-builtpublished-wi } ) ++ Seq(javaOptions in XwpPlugin.container := Seq("-Dconfig.file=local.conf")) ) dependsOn(service % "test->test;compile->compile", domain, common) lazy val dist = Project( "codebrag-dist", file("codebrag-dist"), settings = buildSettings ++ assemblySettings ++ Seq( libraryDependencies ++= Seq(jetty), mainClass in assembly := Some("com.softwaremill.codebrag.Codebrag"), // We need to include the whole webapp, hence replacing the resource directory unmanagedResourceDirectories in Compile <<= baseDirectory { bd => List(bd.getParentFile() / rest.base.getName / "src" / "main", bd.getParentFile() / ui.base.getName / "dist") }, mergeStrategy in assembly <<= (mergeStrategy in assembly) { (old) => { // There are two of such files in jgit and javax.servlet - but we don't really care about them (I guess ... ;) ) // Probably some OSGi stuff. case "plugin.properties" => MergeStrategy.discard case PathList("META-INF", "eclipse.inf") => MergeStrategy.discard // Here we don't care for sure. case "about.html" => MergeStrategy.discard case x => old(x) } } ) ) dependsOn (ui, rest) lazy val ui = Project( "codebrag-ui", file("codebrag-ui"), settings = buildSettings ++ webClientBuildSettings ++ Seq( (compile in Compile) <<= (compile in Compile) dependsOn (buildWebClient) ) ) lazy val uiTests = Project( "codebrag-ui-tests", file("codebrag-ui-tests"), settings = buildSettings ++ Seq( libraryDependencies ++= selenium ++ Seq(awaitility) ) ) dependsOn (dist) // To run the embedded container, we need to provide the path to the configuration. To make things easier, we assume // that the local conf is in the current dir in the local.conf file. System.setProperty("config.file", "local.conf") }
frodejohansen/codebrag
project/Build.scala
Scala
agpl-3.0
11,092
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.it.http import java.io.ByteArrayInputStream import java.util.Arrays import akka.NotUsed import akka.stream.javadsl.Source import com.fasterxml.jackson.databind.JsonNode import play.api.Application import play.api.inject.guice.GuiceApplicationBuilder import play.api.test._ import play.api.libs.ws.WSResponse import play.it._ import play.libs.{ Comet, EventSource, Json } import play.mvc.{ Http, Results } class NettyJavaResultsHandlingSpec extends JavaResultsHandlingSpec with NettyIntegrationSpecification class AkkaHttpJavaResultsHandlingSpec extends JavaResultsHandlingSpec with AkkaHttpIntegrationSpecification trait JavaResultsHandlingSpec extends PlaySpecification with WsTestClient with ServerIntegrationSpecification { sequential "Java results handling" should { def makeRequest[T](controller: MockController)(block: WSResponse => T) = { implicit val port = testServerPort lazy val app: Application = GuiceApplicationBuilder().routes { case _ => JAction(app, controller) }.build() running(TestServer(port, app)) { val response = await(wsUrl("/").get()) block(response) } } "treat headers case insensitively" in makeRequest(new MockController { def action = { response.setHeader("Server", "foo") response.setHeader("server", "bar") Results.ok("Hello world").withHeader("Other", "foo").withHeader("other", "bar") } }) { response => response.header("Server") must beSome("bar") response.header("Other") must beSome("bar") response.body must_== "Hello world" } "add cookies in Result" in makeRequest(new MockController { def action = { Results.ok("Hello world") .withCookies(new Http.Cookie("bar", "KitKat", 1000, "/", "example.com", false, true, null)) .withCookies(new Http.Cookie("framework", "Play", 1000, "/", "example.com", false, true, null)) } }) { response => response.allHeaders("Set-Cookie") must contain((s: String) => s.startsWith("bar=KitKat;")) response.allHeaders("Set-Cookie") must contain((s: String) => s.startsWith("framework=Play;")) response.body must_== "Hello world" } "handle duplicate withCookies in Result" in { val result = Results.ok("Hello world") .withCookies(new Http.Cookie("bar", "KitKat", 1000, "/", "example.com", false, true, null)) .withCookies(new Http.Cookie("bar", "Mars", 1000, "/", "example.com", false, true, null)) import scala.collection.JavaConverters._ val cookies = result.cookies().iterator().asScala.toList val cookieValues = cookies.map(_.value) cookieValues must not contain ("KitKat") cookieValues must contain("Mars") } "handle duplicate cookies" in makeRequest(new MockController { def action = { Results.ok("Hello world") .withCookies(new Http.Cookie("bar", "KitKat", 1000, "/", "example.com", false, true, null)) .withCookies(new Http.Cookie("bar", "Mars", 1000, "/", "example.com", false, true, null)) } }) { response => response.allHeaders("Set-Cookie") must contain((s: String) => s.startsWith("bar=Mars;")) response.body must_== "Hello world" } "add cookies in Response" in makeRequest(new MockController { def action = { response.setCookie(new Http.Cookie("foo", "1", 1000, "/", "example.com", false, true, null)) Results.ok("Hello world") } }) { response => response.header("Set-Cookie").get must contain("foo=1;") response.body must_== "Hello world" } "clear Session" in makeRequest(new MockController { def action = { session.clear() Results.ok("Hello world") } }) { response => response.header("Set-Cookie").get must contain("PLAY_SESSION=; Max-Age=-86400") response.body must_== "Hello world" } "add cookies in both Response and Result" in makeRequest(new MockController { def action = { response.setCookie(new Http.Cookie("foo", "1", 1000, "/", "example.com", false, true, null)) Results.ok("Hello world").withCookies( new Http.Cookie("bar", "KitKat", 1000, "/", "example.com", false, true, null) ) } }) { response => response.allHeaders.get("Set-Cookie").get(0) must contain("bar=KitKat") response.allHeaders.get("Set-Cookie").get(1) must contain("foo=1") response.body must_== "Hello world" } "send strict results" in makeRequest(new MockController { def action = Results.ok("Hello world") }) { response => response.header(CONTENT_LENGTH) must beSome("11") response.body must_== "Hello world" } "chunk comet results from string" in makeRequest(new MockController { def action = { import scala.collection.JavaConverters._ val dataSource = akka.stream.javadsl.Source.from(List("a", "b", "c").asJava) val cometSource = dataSource.via(Comet.string("callback")) Results.ok().chunked(cometSource) } }) { response => response.header(TRANSFER_ENCODING) must beSome("chunked") response.header(CONTENT_LENGTH) must beNone response.body must contain("<html><body><script type=\\"text/javascript\\">callback('a');</script><script type=\\"text/javascript\\">callback('b');</script><script type=\\"text/javascript\\">callback('c');</script>") } "chunk comet results from json" in makeRequest(new MockController { def action = { val objectNode = Json.newObject objectNode.put("foo", "bar") val dataSource: Source[JsonNode, NotUsed] = akka.stream.javadsl.Source.from(Arrays.asList(objectNode)) val cometSource = dataSource.via(Comet.json("callback")) Results.ok().chunked(cometSource) } }) { response => response.header(TRANSFER_ENCODING) must beSome("chunked") response.header(CONTENT_LENGTH) must beNone response.body must contain("<html><body><script type=\\"text/javascript\\">callback({\\"foo\\":\\"bar\\"});</script>") } "chunk event source results" in makeRequest(new MockController { def action = { import scala.collection.JavaConverters._ val dataSource = akka.stream.javadsl.Source.from(List("a", "b").asJava).map { new akka.japi.function.Function[String, EventSource.Event] { def apply(t: String) = EventSource.Event.event(t) } } val eventSource = dataSource.via(EventSource.flow()) Results.ok().chunked(eventSource).as("text/event-stream") } }) { response => response.header(CONTENT_TYPE) must beSome.like { case value => value.toLowerCase(java.util.Locale.ENGLISH) must_== "text/event-stream" } response.header(TRANSFER_ENCODING) must beSome("chunked") response.header(CONTENT_LENGTH) must beNone response.body must_== "data: a\\n\\ndata: b\\n\\n" } "stream input stream responses as chunked" in makeRequest(new MockController { def action = { Results.ok(new ByteArrayInputStream("hello".getBytes("utf-8"))) } }) { response => response.header(TRANSFER_ENCODING) must beSome("chunked") response.body must_== "hello" } "not chunk input stream results if a content length is set" in makeRequest(new MockController { def action = { // chunk size 2 to force more than one chunk Results.ok(new ByteArrayInputStream("hello".getBytes("utf-8")), 5) } }) { response => response.header(CONTENT_LENGTH) must beSome("5") response.header(TRANSFER_ENCODING) must beNone response.body must_== "hello" } } }
ktoso/playframework
framework/src/play-integration-test/src/test/scala/play/it/http/JavaResultsHandlingSpec.scala
Scala
apache-2.0
7,789
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.util.{MapData, ArrayData} import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String} /** * A mutable wrapper that makes two rows appear as a single concatenated row. Designed to * be instantiated once per thread and reused. */ class JoinedRow extends InternalRow { private[this] var row1: InternalRow = _ private[this] var row2: InternalRow = _ def this(left: InternalRow, right: InternalRow) = { this() row1 = left row2 = right } /** Updates this JoinedRow to used point at two new base rows. Returns itself. */ def apply(r1: InternalRow, r2: InternalRow): JoinedRow = { row1 = r1 row2 = r2 this } /** Updates this JoinedRow by updating its left base row. Returns itself. */ def withLeft(newLeft: InternalRow): JoinedRow = { row1 = newLeft this } /** Updates this JoinedRow by updating its right base row. Returns itself. */ def withRight(newRight: InternalRow): JoinedRow = { row2 = newRight this } override def toSeq(fieldTypes: Seq[DataType]): Seq[Any] = { assert(fieldTypes.length == row1.numFields + row2.numFields) val (left, right) = fieldTypes.splitAt(row1.numFields) row1.toSeq(left) ++ row2.toSeq(right) } override def numFields: Int = row1.numFields + row2.numFields override def get(i: Int, dt: DataType): AnyRef = if (i < row1.numFields) row1.get(i, dt) else row2.get(i - row1.numFields, dt) override def isNullAt(i: Int): Boolean = if (i < row1.numFields) row1.isNullAt(i) else row2.isNullAt(i - row1.numFields) override def getBoolean(i: Int): Boolean = if (i < row1.numFields) row1.getBoolean(i) else row2.getBoolean(i - row1.numFields) override def getByte(i: Int): Byte = if (i < row1.numFields) row1.getByte(i) else row2.getByte(i - row1.numFields) override def getShort(i: Int): Short = if (i < row1.numFields) row1.getShort(i) else row2.getShort(i - row1.numFields) override def getInt(i: Int): Int = if (i < row1.numFields) row1.getInt(i) else row2.getInt(i - row1.numFields) override def getLong(i: Int): Long = if (i < row1.numFields) row1.getLong(i) else row2.getLong(i - row1.numFields) override def getFloat(i: Int): Float = if (i < row1.numFields) row1.getFloat(i) else row2.getFloat(i - row1.numFields) override def getDouble(i: Int): Double = if (i < row1.numFields) row1.getDouble(i) else row2.getDouble(i - row1.numFields) override def getDecimal(i: Int, precision: Int, scale: Int): Decimal = { if (i < row1.numFields) { row1.getDecimal(i, precision, scale) } else { row2.getDecimal(i - row1.numFields, precision, scale) } } override def getUTF8String(i: Int): UTF8String = if (i < row1.numFields) row1.getUTF8String(i) else row2.getUTF8String(i - row1.numFields) override def getBinary(i: Int): Array[Byte] = if (i < row1.numFields) row1.getBinary(i) else row2.getBinary(i - row1.numFields) override def getArray(i: Int): ArrayData = if (i < row1.numFields) row1.getArray(i) else row2.getArray(i - row1.numFields) override def getInterval(i: Int): CalendarInterval = if (i < row1.numFields) row1.getInterval(i) else row2.getInterval(i - row1.numFields) override def getMap(i: Int): MapData = if (i < row1.numFields) row1.getMap(i) else row2.getMap(i - row1.numFields) override def getStruct(i: Int, numFields: Int): InternalRow = { if (i < row1.numFields) { row1.getStruct(i, numFields) } else { row2.getStruct(i - row1.numFields, numFields) } } override def anyNull: Boolean = row1.anyNull || row2.anyNull override def copy(): InternalRow = { val copy1 = row1.copy() val copy2 = row2.copy() new JoinedRow(copy1, copy2) } override def toString: String = { // Make sure toString never throws NullPointerException. if ((row1 eq null) && (row2 eq null)) { "[ empty row ]" } else if (row1 eq null) { row2.toString } else if (row2 eq null) { row1.toString } else { s"{${row1.toString} + ${row2.toString}}" } } }
chenc10/Spark-PAF
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/JoinedRow.scala
Scala
apache-2.0
5,054
package p04various object Def { val eol = System.lineSeparator }
vkubicki/Sernel
src/main/scala/p04various/00 - Def.scala
Scala
bsd-3-clause
71
package com.codahale.jerkson import java.io.IOException import org.codehaus.jackson.map.JsonMappingException import org.codehaus.jackson.{JsonParseException, JsonProcessingException} object ParsingException { def apply(cause: JsonProcessingException): ParsingException = { val message = cause match { case e: JsonMappingException => e.getMessage case e: JsonParseException => { val fake = new JsonParseException("", e.getLocation) val msg = e.getMessage.replace(fake.getMessage, "").replaceAll(""" (\(from.*\))""", "") "Malformed JSON. %s at character offset %d.".format(msg, e.getLocation.getCharOffset) } } new ParsingException(message, cause) } } class ParsingException(message: String, cause: Throwable) extends IOException(message, cause)
cphylabs/jerkson-old
src/main/scala/com/codahale/jerkson/ParsingException.scala
Scala
mit
813
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wvlet.airframe.rx import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicLong /** * Ticker is for measuring the elapsed time. */ trait Ticker { // Return the current nanoseconds time def currentNanos: Long } /** * A Ticker implementation that can be incremented manually for testing purpose * * This implementation is similar to FakeTicker in Guava: * https://github.com/google/guava/blob/master/guava-testlib/src/com/google/common/testing/FakeTicker.java */ case class ManualTicker(nanos: AtomicLong = new AtomicLong(0), autoIncrementStepNanos: Long = 0) extends Ticker { /** * Set the auto-increment step, which will be added after reading a value */ def withIncrements(time: Long, unit: TimeUnit): ManualTicker = { this.copy(autoIncrementStepNanos = unit.toNanos(time)) } /** * Advance the ticker for the given amount * @param time * @param unit * @return */ def advance(time: Long, unit: TimeUnit): ManualTicker = { nanos.addAndGet(unit.toNanos(time)) this } /** * Advance the ticker for the given nanoseconds * @param nanoseconds * @return */ def advance(nanoseconds: Long): ManualTicker = { advance(nanoseconds, TimeUnit.NANOSECONDS) } def currentNanos: Long = { nanos.getAndAdd(autoIncrementStepNanos) } } object Ticker { // A ticker that reads the current time using System.nanoTime() def systemTicker: Ticker = { new Ticker { override def currentNanos: Long = System.nanoTime() } } /** * Create a testing ticker that can be manually advanced */ def manualTicker: ManualTicker = { ManualTicker() } }
wvlet/airframe
airframe-rx/src/main/scala/wvlet/airframe/rx/Ticker.scala
Scala
apache-2.0
2,250
package mm4s.test import java.util.UUID import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import akka.util.Timeout import mm4s.api.UserModels.{LoggedIn, LoginByUsername} import mm4s.api.{Streams, Users} import org.scalatest.Tag import scala.concurrent.Future import scala.concurrent.duration.DurationInt trait IntegrationTest { // overridable access, defaults to admin def user: String = "admin" def pass: String = "password" def team: String = "humans" val defaultDuration = 10.seconds val defaultTimeout = Timeout(defaultDuration) def token()(implicit system: ActorSystem, mat: ActorMaterializer): Future[LoggedIn] = { Users.login(LoginByUsername(user, pass, team)) .via(connection()) .via(Users.extractSession()) .runWith(Sink.head) } def connection()(implicit system: ActorSystem) = { Streams.connection("localhost", 8080) } def random() = UUID.randomUUID.toString.take(5) val integration = Tag("mm4s.test.IntegrationTest") }
jw3/mm4s
api/src/test/scala/mm4s/test/IntegrationTest.scala
Scala
apache-2.0
1,034
package picasso.frontend.compilerPlugin import picasso.utils.{LogDebug, Logger, IO} object PluginSuiteCommon { //add the compiler to the CP val configFile = "frontend/compilerPlugin/build.sbt" //build.scala.versions=scalaVersion lazy val scalaVersion = { val values = IO.readTextFile(configFile) val pre = values.indexOf("scalaVersion") val start = values.indexOf("\\"", pre) + 1 val end = values.indexOf( "\\"", start) values.substring(start, end) } lazy val scalaVersionDot = scalaVersion.replace("-",".") lazy val scalaLib = sys.env("HOME") + "/.ivy2/cache/org.scala-lang/scala-library/jars/scala-library-"+scalaVersionDot+".jar" val compilerCP = List(scalaLib + ":frontend/compilerPlugin/target/scala-"+scalaVersionDot+"/classes/" ) //assumes that the pwd the project root (where sbt is run) val testDir = "frontend/compilerPlugin/src/test/resources/plugin/" def runPluginCover(filesNames: List[String], options: List[String] = Nil) = { //Console.println("cp = " + compilerCP) val previousLog = Logger.getMinPriority Logger.setMinPriority(LogDebug) Logger.disallow("graph") try { val files = filesNames map (testDir + _ + ".scala") assert(PluginRunner.testCoverComputation((options ::: files).toArray, compilerCP)) } finally { Logger.setMinPriority(previousLog) Logger.allow("graph") } } def runPluginError(filesNames: List[String], options: List[String] = Nil): Boolean = { val previousLog = Logger.getMinPriority Logger.setMinPriority(LogDebug) Logger.disallow("graph") Logger.disallow("Analysis") try { val files = filesNames map (testDir + _ + ".scala") PluginRunner.testAssert((options ::: files).toArray, compilerCP).isDefined //otherwise would return a trace } finally { Logger.setMinPriority(previousLog) Logger.allow("graph") Logger.allow("Analysis") } } def runPluginParse(filesNames: List[String], options: List[String] = Nil) = { //Console.println("cp = " + compilerCP) val previousLog = Logger.getMinPriority Logger.setMinPriority(LogDebug) try { val files = filesNames map (testDir + _ + ".scala") PluginRunner.testParseOnly((options ::: files).toArray, compilerCP) } finally { Logger.setMinPriority(previousLog) } } }
dzufferey/picasso
frontend/compilerPlugin/src/test/scala/picasso/frontend/compilerPlugin/PluginSuiteCommon.scala
Scala
bsd-2-clause
2,399
package org.rogach.scallop import org.rogach.scallop.exceptions.GenericScallopException import scala.util.Try import scala.concurrent.duration.{Duration, FiniteDuration} /** This trait contains various predefined converters for common use-cases. * org.rogach.scallop package object inherits from this trait, thus you can * get all the converters simply by importing `org.rogach.scallop._`. */ trait DefaultConverters { implicit val flagConverter: ValueConverter[Boolean] = new ValueConverter[Boolean] { def parse(s: List[(String, List[String])]) = s match { case (_,Nil) :: Nil => Right(Some(true)) case Nil => Right(None) case _ => Left("too many arguments for flag option") } val argType = ArgType.FLAG } /** Creates a converter for an option with a single argument. * @param conv The conversion function to use. May throw an exception on error. * @param handler An error handler function for writing custom error messages. * @return A ValueConverter instance. */ def singleArgConverter[A]( conv: String => A, handler: PartialFunction[Throwable, Either[String, Option[A]]] = PartialFunction.empty ): ValueConverter[A] = new ValueConverter[A] { def parse(s: List[(String, List[String])]) = { s match { case (_, i :: Nil) :: Nil => Try(Right(Some(conv(i)))).recover(handler).recover({ case e: Exception => Left(e.toString) }).get case Nil => Right(None) case _ => Left("you should provide exactly one argument for this option") } } val argType = ArgType.SINGLE } implicit val charConverter: ValueConverter[Char] = singleArgConverter[Char](_.head, PartialFunction.empty) implicit val stringConverter: ValueConverter[String] = singleArgConverter[String](identity, PartialFunction.empty) /** Handler function for numeric types which expects a NumberFormatException and prints a more * helpful error message. * @param name the type name to display */ def numberHandler[T](name: String): PartialFunction[Throwable, Either[String, Option[T]]] = { case _: NumberFormatException => Left(Util.format("bad %s value", name)) } implicit val byteConverter: ValueConverter[Byte] = singleArgConverter[Byte](_.toByte, numberHandler("Byte")) implicit val shortConverter: ValueConverter[Short] = singleArgConverter[Short](_.toShort, numberHandler("Short")) implicit val intConverter: ValueConverter[Int] = singleArgConverter[Int](_.toInt, numberHandler("Int")) implicit val longConverter: ValueConverter[Long] = singleArgConverter[Long](_.toLong, numberHandler("Long")) implicit val floatConverter: ValueConverter[Float] = singleArgConverter[Float](_.toFloat, numberHandler("Float")) implicit val doubleConverter: ValueConverter[Double] = singleArgConverter[Double](_.toDouble, numberHandler("Double")) implicit val bigIntConverter: ValueConverter[BigInt] = singleArgConverter(BigInt(_), numberHandler("integer")) implicit val bigDecimalConverter: ValueConverter[BigDecimal] = singleArgConverter(BigDecimal(_), numberHandler("decimal")) implicit val durationConverter: ValueConverter[Duration] = singleArgConverter(Duration(_)) implicit val finiteDurationConverter: ValueConverter[FiniteDuration] = singleArgConverter[FiniteDuration]({ arg => Duration(arg) match { case d: FiniteDuration => d case d => throw new IllegalArgumentException(s"'$d' is not a FiniteDuration.") } }, PartialFunction.empty) /** Creates a converter for an option which accepts multiple arguments. * @param conv The conversion function to use on each argument. May throw an exception on error. * @return A ValueConverter instance. */ def listArgConverter[A](conv: String => A): ValueConverter[List[A]] = new ValueConverter[List[A]] { def parse(s: List[(String, List[String])]) = { try { val l = s.map(_._2).flatten.map(i => conv(i)) if (l.isEmpty) Right(None) else Right(Some(l)) } catch { case e: Exception => Left(e.toString) } } val argType = ArgType.LIST } implicit val byteListConverter: ValueConverter[List[Byte]] = listArgConverter[Byte](_.toByte) implicit val shortListConverter: ValueConverter[List[Short]] = listArgConverter[Short](_.toShort) implicit val intListConverter: ValueConverter[List[Int]] = listArgConverter[Int](_.toInt) implicit val longListConverter: ValueConverter[List[Long]] = listArgConverter[Long](_.toLong) implicit val floatListConverter: ValueConverter[List[Float]] = listArgConverter[Float](_.toFloat) implicit val doubleListConverter: ValueConverter[List[Double]] = listArgConverter[Double](_.toDouble) implicit val stringListConverter: ValueConverter[List[String]] = listArgConverter[String](identity) /** Creates a converter for a property option. * @param conv The converter function to use on each value. May throw an exception on error. * @return A ValueConverter instance. */ def propsConverter[A]( conv: ValueConverter[A] ): ValueConverter[Map[String,A]] = new ValueConverter[Map[String,A]] { def parse(s: List[(String, List[String])]) = { try { Right { val pairs = s.map(_._2).flatten.map(_.trim).filter("," != _).flatMap(_.split("(?<!\\\\\\\\),")).map(_.replace("\\\\,", ",")) val m = pairs.map { pair => val kv = pair.split("(?<!\\\\\\\\)=").map(_.replace("\\\\=", "=")) val key = kv(0) val value = kv(1) conv.parse(List(("",List(value)))) match { case Right(Some(parseResult)) => (key, parseResult) case Right(None) => throw new GenericScallopException("No result from props converter") case Left(msg) => throw new GenericScallopException(msg) } }.toMap if (m.nonEmpty) Some(m) else None } } catch { case e: Exception => Left(e.toString) } } val argType = ArgType.LIST } implicit val bytePropsConverter: ValueConverter[Map[String, Byte]] = propsConverter[Byte](byteConverter) implicit val shortPropsConverter: ValueConverter[Map[String, Short]] = propsConverter[Short](shortConverter) implicit val intPropsConverter: ValueConverter[Map[String, Int]] = propsConverter[Int](intConverter) implicit val longPropsConverter: ValueConverter[Map[String, Long]] = propsConverter[Long](longConverter) implicit val floatPropsConverter: ValueConverter[Map[String, Float]] = propsConverter[Float](floatConverter) implicit val doublePropsConverter: ValueConverter[Map[String, Double]] = propsConverter[Double](doubleConverter) implicit val charPropsConverter: ValueConverter[Map[String, Char]] = propsConverter[Char](charConverter) implicit val stringPropsConverter: ValueConverter[Map[String, String]] = propsConverter[String](stringConverter) /** Converter for a tally option, used in ScallopConf.tally */ val tallyConverter = new ValueConverter[Int] { def parse(s: List[(String, List[String])]) = { if (s.exists(_._2.nonEmpty)) Left("this option doesn't need arguments") else if (s.nonEmpty) Right(Some(s.size)) else Right(None) } val argType = ArgType.FLAG } /** Creates a converter for an option with single optional argument * (it will parse both `--opt` and `--opt arg` command lines). * @param default The default value to use if argument wasn't provided. * @param conv Converter instance to use if argument was provided. * @return A ValueConverter instance. */ def optDefault[A](default: A)(implicit conv: ValueConverter[A]): ValueConverter[A] = new ValueConverter[A] { def parse(s: List[(String, List[String])]) = { s match { case Nil => Right(None) case (_, Nil) :: Nil => Right(Some(default)) case call @ ((_, v :: Nil) :: Nil) => conv.parse(call) case _ => Left("Too many arguments") } } val argType = ArgType.LIST } }
scallop/scallop
src/main/scala/org.rogach.scallop/DefaultConverters.scala
Scala
mit
8,099
// see the comments for macroExpand.onDelayed for an explanation of what's tested here object Test extends App { case class Foo(i: Int, s: String, b: Boolean) def foo[C, L](c: C)(implicit iso: Iso[C, L]): L = iso.to(c) { val equiv = foo(Foo(23, "foo", true)) def typed[T](t: => T) {} typed[(Int, String, Boolean)](equiv) println(equiv) } }
lampepfl/dotty
tests/disabled/macro/run/macro-whitebox-fundep-materialization/Test_2.scala
Scala
apache-2.0
364
package streams import common._ /** * This component implements a parser to define terrains from a * graphical ASCII representation. * * When mixing in that component, a level can be defined by * defining the field `level` in the following form: * * val level = * """------ * |--ST-- * |--oo-- * |--oo-- * |------""".stripMargin * * - The `-` character denotes parts which are outside the terrain * - `o` denotes fields which are part of the terrain * - `S` denotes the start position of the block (which is also considered * inside the terrain) * - `T` denotes the final position of the block (which is also considered * inside the terrain) * * In this example, the first and last lines could be omitted, and * also the columns that consist of `-` characters only. */ trait StringParserTerrain extends GameDef { /** * A ASCII representation of the terrain. This field should remain * abstract here. */ val level: String /** * This method returns terrain function that represents the terrain * in `levelVector`. The vector contains parsed version of the `level` * string. For example, the following level * * val level = * """ST * |oo * |oo""".stripMargin * * is represented as * * Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o')) * * The resulting function should return `true` if the position `pos` is * a valid position (not a '-' character) inside the terrain described * by `levelVector`. */ def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean = pos => pos.row >= 0 && pos.row < levelVector.length && pos.col >= 0 && pos.col < levelVector(pos.row).length && !levelVector(pos.row)(pos.col).equals('-') /** * This function should return the position of character `c` in the * terrain described by `levelVector`. You can assume that the `c` * appears exactly once in the terrain. * * Hint: you can use the functions `indexWhere` and / or `indexOf` of the * `Vector` class */ def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = { val row = levelVector.indexWhere(_ contains c) val col = levelVector(row).indexOf(c) Pos(row, col) } private lazy val vector: Vector[Vector[Char]] = Vector(level.split("\\n").map(str => Vector(str: _*)): _*) lazy val terrain: Terrain = terrainFunction(vector) lazy val startPos: Pos = findChar('S', vector) lazy val goal: Pos = findChar('T', vector) }
yurii-khomenko/fpScalaSpec
c2w2streams/src/main/scala/streams/StringParserTerrain.scala
Scala
gpl-3.0
2,555
package controllers import com.google.inject.Inject import java.io.ByteArrayInputStream import models.BusinessDetailsModel import models.CacheKeyPrefix import models.CaptureCertificateDetailsFormModel import models.CaptureCertificateDetailsModel import models.ConfirmFormModel import models.FulfilModel import models.SuccessViewModel import models.VehicleAndKeeperLookupFormModel import pdf.PdfService import play.api.libs.iteratee.Enumerator import play.api.mvc.{Action, Controller} import scala.concurrent.ExecutionContext.Implicits.global import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory import uk.gov.dvla.vehicles.presentation.common.clientsidesession.CookieImplicits.RichCookies import uk.gov.dvla.vehicles.presentation.common.clientsidesession.CookieImplicits.RichResult import uk.gov.dvla.vehicles.presentation.common.LogFormats.DVLALogger import uk.gov.dvla.vehicles.presentation.common.model.AddressModel import uk.gov.dvla.vehicles.presentation.common.model.VehicleAndKeeperDetailsModel import uk.gov.dvla.vehicles.presentation.common.services.DateService import utils.helpers.Config import views.vrm_assign.RelatedCacheKeys.removeCookiesOnExit import views.vrm_assign.VehicleLookup.{TransactionIdCacheKey, UserType_Business, UserType_Keeper} final class Success @Inject()(pdfService: PdfService) (implicit clientSideSessionFactory: ClientSideSessionFactory, config: Config, dateService: DateService) extends Controller with DVLALogger { def present = Action { implicit request => (request.cookies.getString(TransactionIdCacheKey), request.cookies.getModel[VehicleAndKeeperLookupFormModel], request.cookies.getModel[VehicleAndKeeperDetailsModel], request.cookies.getModel[CaptureCertificateDetailsFormModel], request.cookies.getModel[CaptureCertificateDetailsModel], request.cookies.getModel[FulfilModel]) match { case (Some(transactionId), Some(vehicleAndKeeperLookupForm), Some(vehicleAndKeeperDetails), Some(captureCertificateDetailsFormModel), Some(captureCertificateDetailsModel), Some(fulfilModel)) => val businessDetailsOpt = request.cookies.getModel[BusinessDetailsModel]. filter(_ => vehicleAndKeeperLookupForm.userType == UserType_Business) val keeperEmailOpt = request.cookies.getModel[ConfirmFormModel].flatMap(_.keeperEmail) val successViewModel = SuccessViewModel( vehicleAndKeeperDetails, businessDetailsOpt, vehicleAndKeeperLookupForm, keeperEmailOpt, fulfilModel, transactionId, captureCertificateDetailsModel.certificate ) logMessage(request.cookies.trackingId(), Info, "User transaction completed successfully - now displaying the assign success view" ) Ok(views.html.vrm_assign.success(successViewModel, vehicleAndKeeperLookupForm.userType == UserType_Keeper)) case _ => val msg = "User transaction completed successfully but not displaying the success view " + "because the user arrived without all of the required cookies" logMessage(request.cookies.trackingId(), Warn, msg) Redirect(routes.Confirm.present()) } } def createPdf = Action { implicit request => ( request.cookies.getModel[VehicleAndKeeperLookupFormModel], request.cookies.getString(TransactionIdCacheKey), request.cookies.getModel[VehicleAndKeeperDetailsModel]) match { case (Some(vehicleAndKeeperLookupFormModel), Some(transactionId), Some(vehicleAndKeeperDetails)) => val pdf = pdfService.create( transactionId, Seq( vehicleAndKeeperDetails.title, vehicleAndKeeperDetails.firstName, vehicleAndKeeperDetails.lastName ).flatten.mkString(" "), vehicleAndKeeperDetails.address, vehicleAndKeeperLookupFormModel.replacementVRN.replace(" ", ""), request.cookies.trackingId() ) val inputStream = new ByteArrayInputStream(pdf) val dataContent = Enumerator.fromStream(inputStream) // IMPORTANT: be very careful adding/changing any header information. You will need to run ALL tests after // and manually test after making any change. val newVRM = vehicleAndKeeperLookupFormModel.replacementVRN.replace(" ", "") val contentDisposition = "attachment;filename=" + newVRM + "-eV948.pdf" Ok.feed(dataContent).withHeaders( CONTENT_TYPE -> "application/pdf", CONTENT_DISPOSITION -> contentDisposition ) case _ => BadRequest("You are missing the cookies required to create a pdf") } } def finish = Action { implicit request => Redirect(routes.LeaveFeedback.present()). discardingCookies(removeCookiesOnExit) } }
dvla/vrm-assign-online
app/controllers/Success.scala
Scala
mit
4,963
/* * Copyright (c) 2014-2015 by its authors. Some rights reserved. * See the project homepage at: http://www.monifu.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monifu.reactive.observables import minitest.TestSuite import monifu.concurrent.schedulers.TestScheduler import monifu.reactive.Ack.Continue import monifu.reactive.OverflowStrategy.Unbounded import monifu.reactive.channels.PublishChannel import monifu.reactive.exceptions.DummyException import monifu.reactive.{Ack, Observer, Observable} import concurrent.duration._ import scala.concurrent.Future object RefCountObservableSuite extends TestSuite[TestScheduler] { def setup(): TestScheduler = TestScheduler() def tearDown(s: TestScheduler): Unit = { assert(s.state.get.tasks.isEmpty, "TestScheduler should have no pending tasks") } test("should work") { implicit s => var received = 0L var completed = 0 def createObserver = new Observer[Long] { def onNext(elem: Long): Future[Ack] = { received += 1 Continue } def onError(ex: Throwable): Unit = () def onComplete(): Unit = completed += 1 } val ref = Observable.interval(2.seconds).publish().refCount() val s1 = ref.subscribe(createObserver) assertEquals(received, 0) s.tick(); assertEquals(received, 1) s.tick(2.seconds); assertEquals(received, 2) val s2 = ref.subscribe(createObserver) s.tick(); assertEquals(received, 2) s.tick(2.seconds); assertEquals(received, 4) s.tick(2.seconds); assertEquals(received, 6) s1.cancel() s.tick(); assertEquals(received, 6) s.tick(2.seconds); assertEquals(received, 7) assertEquals(completed, 1) s2.cancel() s.tick(2.seconds); assertEquals(received, 7) assertEquals(completed, 2) s.tick(2.seconds) ref.subscribe(createObserver) s.tick(2.seconds); assertEquals(received, 7) assertEquals(completed, 3) ref.subscribe(createObserver) s.tick(2.seconds); assertEquals(received, 7) assertEquals(completed, 4) } test("onError should stop everything") { implicit s => var received = 0L var completed = 0 def createObserver = new Observer[Long] { def onNext(elem: Long): Future[Ack] = { received += 1 Continue } def onError(ex: Throwable): Unit = completed += 1 def onComplete(): Unit = () } val ch = PublishChannel[Long](Unbounded) val ref = ch.publish().refCount() ref.subscribe(createObserver) ref.subscribe(createObserver) assertEquals(received, 0) ch.pushNext(1) s.tick(); assertEquals(received, 2) ch.pushError(DummyException("dummy")) s.tick(); assertEquals(completed, 2) ref.subscribe(createObserver) assertEquals(completed, 3) ref.subscribe(createObserver) assertEquals(completed, 4) assertEquals(received, 2) } test("onComplete") { implicit s => var received = 0L var completed = 0 def createObserver = new Observer[Long] { def onNext(elem: Long): Future[Ack] = { received += 1 Continue } def onError(ex: Throwable): Unit = () def onComplete(): Unit = completed += 1 } val ch = PublishChannel[Long](Unbounded) val ref = ch.publish().refCount() ref.subscribe(createObserver) ref.subscribe(createObserver) ch.pushNext(1) ch.pushComplete() s.tick() assertEquals(received, 2) assertEquals(completed, 2) } }
virtualirfan/monifu
monifu/shared/src/test/scala/monifu/reactive/observables/RefCountObservableSuite.scala
Scala
apache-2.0
3,988
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.master.ui import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState} import org.apache.spark.deploy.master.Master import org.apache.spark.internal.Logging import org.apache.spark.internal.config.UI.UI_KILL_ENABLED import org.apache.spark.ui.{SparkUI, WebUI} import org.apache.spark.ui.JettyUtils._ /** * Web UI server for the standalone master. */ private[master] class MasterWebUI( val master: Master, requestedPort: Int) extends WebUI(master.securityMgr, master.securityMgr.getSSLOptions("standalone"), requestedPort, master.conf, name = "MasterUI") with Logging { val masterEndpointRef = master.self val killEnabled = master.conf.get(UI_KILL_ENABLED) initialize() /** Initialize all components of the server. */ def initialize() { val masterPage = new MasterPage(this) attachPage(new ApplicationPage(this)) attachPage(masterPage) addStaticHandler(MasterWebUI.STATIC_RESOURCE_DIR) attachHandler(createRedirectHandler( "/app/kill", "/", masterPage.handleAppKillRequest, httpMethods = Set("POST"))) attachHandler(createRedirectHandler( "/driver/kill", "/", masterPage.handleDriverKillRequest, httpMethods = Set("POST"))) } def addProxy(): Unit = { val handler = createProxyHandler(idToUiAddress) attachHandler(handler) } def idToUiAddress(id: String): Option[String] = { val state = masterEndpointRef.askSync[MasterStateResponse](RequestMasterState) val maybeWorkerUiAddress = state.workers.find(_.id == id).map(_.webUiAddress) val maybeAppUiAddress = state.activeApps.find(_.id == id).map(_.desc.appUiUrl) maybeWorkerUiAddress.orElse(maybeAppUiAddress) } } private[master] object MasterWebUI { private val STATIC_RESOURCE_DIR = SparkUI.STATIC_RESOURCE_DIR }
pgandhi999/spark
core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala
Scala
apache-2.0
2,630
/* * Copyright 2016 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600e.v3 import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalString, Input} case class E10(value: Option[String]) extends CtBoxIdentifier("Charity Commission registration number, or OSCR number (if applicable)") with CtOptionalString with Input
ahudspith-equalexperts/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600e/v3/E10.scala
Scala
apache-2.0
878
package rml.args.arg.function case class FunctionOrigin(origin: String) object FunctionOrigin { def apply(clazz: Class[_]): FunctionOrigin = FunctionOrigin(clazz.getName.toString) implicit val origin = FunctionOrigin("No origin specified") }
rml/scala_args
src/main/scala/rml/args/arg/function/FunctionOrigin.scala
Scala
gpl-3.0
251
package scala.lms package epfl package test2 import common._ import test1._ import reflect.SourceContext import java.io.PrintWriter trait Power1 { this: Arith => def power(b: Rep[Double], x: Int): Rep[Double] = if (x == 0) 1.0 else b * power(b, x - 1) } trait Power2 { this: Arith => def power(b: Rep[Double], x: Int)(implicit pos: SourceContext): Rep[Double] = { if (x == 0) 1.0 else if ((x&1) == 0) { val y = power(b, x/2); y * y } else b * power(b, x - 1) } } trait BaseStr extends Base { type Rep[+T] = String //todo added this to provide required unit implicit conversion implicit def unit[T:Manifest](x: T): Rep[T] = x.toString } trait ArithStr extends Arith with BaseStr { //todo removed below //implicit def unit(x: Double) = x.toString def infix_+(x: Rep[Double], y: Rep[Double])(implicit pos: SourceContext) = "(%s+%s)".format(x,y) def infix_-(x: Rep[Double], y: Rep[Double])(implicit pos: SourceContext) = "(%s-%s)".format(x,y) def infix_*(x: Rep[Double], y: Rep[Double])(implicit pos: SourceContext) = "(%s*%s)".format(x,y) def infix_/(x: Rep[Double], y: Rep[Double])(implicit pos: SourceContext) = "(%s/%s)".format(x,y) } class TestPower extends FileDiffSuite { val prefix = home + "test-out/epfl/test2-" def testPower = { withOutFile(prefix+"power") { /* println { val o = new TestPower with ArithRepDirect import o._ power(2,4) } println { val o = new TestPower with ArithRepString import o._ power(2,4) } println { val o = new TestPower with ArithRepString import o._ power("x",4) } println { val o = new TestPower with ArithRepString import o._ power("(x + y)",4) } */ { val o = new Power1 with ArithStr import o._ val r = power(infix_+("x0","x1"),4) println(r) } { val o = new Power2 with ArithStr import o._ val r = power(infix_+("x0","x1"),4) println(r) } { val o = new Power1 with ArithExp import o._ val r = power(fresh[Double] + fresh[Double],4) println(globalDefs.mkString("\\n")) println(r) val p = new ExportGraph { val IR: o.type = o } p.emitDepGraph(r, prefix+"power1-dot") } { val o = new Power1 with ArithExpOpt import o._ val r = power(fresh[Double] + fresh[Double],4) println(globalDefs.mkString("\\n")) println(r) val p = new ExportGraph { val IR: o.type = o } p.emitDepGraph(r, prefix+"power2-dot") } { val o = new Power1 with ArithExpOpt import o._ val f = (x: Rep[Double]) => power(x + x, 4) val p = new ScalaGenFlat with ScalaGenArith { val IR: o.type = o } p.emitSource(f, "Power2", new PrintWriter(System.out)) } { val o = new Power2 with ArithExpOpt import o._ val r = power(fresh[Double] + fresh[Double],4) println(globalDefs.mkString("\\n")) println(r) val p = new ExportGraph { val IR: o.type = o } p.emitDepGraph(r, prefix+"power3-dot") } { val o = new Power2 with ArithExpOpt import o._ val f = (x: Rep[Double]) => power(x + x, 4) val p = new ScalaGenFlat with ScalaGenArith { val IR: o.type = o } p.emitSource(f, "Power3", new PrintWriter(System.out)) } { val o = new Power1 with ArithExpOpt with CompileScala { self => val codegen = new ScalaGenFlat with ScalaGenArith { val IR: self.type = self } } import o._ val power4 = (x:Rep[Double]) => power(x,4) codegen.emitSource(power4, "Power4", new PrintWriter(System.out)) val power4c = compile(power4) println(power4c(2)) } } assertFileEqualsCheck(prefix+"power") assertFileEqualsCheck(prefix+"power1-dot") assertFileEqualsCheck(prefix+"power2-dot") assertFileEqualsCheck(prefix+"power3-dot") } }
scalan/virtualization-lms-core
test-src/epfl/test2-fft/TestPower.scala
Scala
bsd-3-clause
3,949
package org.hibernate.cache.rediscala.regions import java.util.Properties import org.hibernate.cache.rediscala.client.RedisCache import org.hibernate.cache.rediscala.strategy.RedisAccessStrategyFactory import org.hibernate.cache.spi.access.{AccessType, EntityRegionAccessStrategy} import org.hibernate.cache.spi.{CacheDataDescription, EntityRegion} import org.hibernate.cfg.Settings /** * RedisEntityRegion * * @author 배성혁 [email protected] * @since 2014. 2. 21. 오전 9:28 */ class RedisEntityRegion(private[this] val _accessStrategyFactory: RedisAccessStrategyFactory, private[this] val _cache: RedisCache, private[this] val _regionName: String, private[this] val _settings: Settings, private[this] val _metadata: CacheDataDescription, private[this] val _props: Properties) extends RedisTransactionalDataRegion( _accessStrategyFactory, _cache, _regionName, _settings, _metadata, _props ) with EntityRegion { def buildAccessStrategy(accessType: AccessType): EntityRegionAccessStrategy = accessStrategyFactory.createEntityRegionAccessStrategy(this, accessType) }
debop/debop4s
hibernate-rediscala/src/main/scala/org/hibernate/cache/rediscala/regions/RedisEntityRegion.scala
Scala
apache-2.0
1,492
package pl.newicom.dddd.messaging.event import org.joda.time.DateTime import pl.newicom.dddd.aggregate.DomainEvent import pl.newicom.dddd.utils.UUIDSupport._ case class DomainEventMessage( snapshotId: AggregateSnapshotId, override val event: DomainEvent, override val id: String = uuid, override val timestamp: DateTime = new DateTime) extends EventMessage(event, id, timestamp) { override def entityId = aggregateId def this(em: EventMessage, s: AggregateSnapshotId) = this(s, em.event, em.id, em.timestamp) def aggregateId = snapshotId.aggregateId def sequenceNr = snapshotId.sequenceNr }
ahjohannessen/akka-ddd
akka-ddd-messaging/src/main/scala/pl/newicom/dddd/messaging/event/DomainEventMessage.scala
Scala
mit
623
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.plans.logical import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.internal.SQLConf /** * A general hint for the child that is not yet resolved. This node is generated by the parser and * should be removed This node will be eliminated post analysis. * @param name the name of the hint * @param parameters the parameters of the hint * @param child the [[LogicalPlan]] on which this hint applies */ case class UnresolvedHint(name: String, parameters: Seq[Any], child: LogicalPlan) extends UnaryNode { override lazy val resolved: Boolean = false override def output: Seq[Attribute] = child.output } /** * A resolved hint node. The analyzer should convert all [[UnresolvedHint]] into [[ResolvedHint]]. */ case class ResolvedHint(child: LogicalPlan, hints: HintInfo = HintInfo()) extends UnaryNode { override def output: Seq[Attribute] = child.output override lazy val canonicalized: LogicalPlan = child.canonicalized override def computeStats(conf: SQLConf): Statistics = { val stats = child.stats(conf) stats.copy(hints = hints) } } case class HintInfo(broadcast: Boolean = false) { /** Must be called when computing stats for a join operator to reset hints. */ def resetForJoin(): HintInfo = copy(broadcast = false) override def toString: String = { val hints = scala.collection.mutable.ArrayBuffer.empty[String] if (broadcast) { hints += "broadcast" } if (hints.isEmpty) "none" else hints.mkString("(", ", ", ")") } }
map222/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/hints.scala
Scala
apache-2.0
2,365
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs // Licence: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.core import akka.actor._ import akka.event.LoggingReceive import org.ensime.api._ import org.ensime.indexer.DatabaseService.FqnSymbol import org.ensime.indexer.{ EnsimeVFS, SearchService } import org.ensime.model._ // only used for queries by other components case class TypeCompletionsReq(prefix: String, maxResults: Int) class Indexer( index: SearchService, implicit val config: EnsimeConfig, implicit val vfs: EnsimeVFS ) extends Actor with ActorLogging { private def typeResult(hit: FqnSymbol) = TypeSearchResult( hit.fqn, hit.fqn.split("\\\\.").last, hit.declAs, LineSourcePositionHelper.fromFqnSymbol(hit)(config, vfs) ) def oldSearchTypes(query: String, max: Int) = index.searchClasses(query, max).filterNot { name => name.fqn.endsWith("$") || name.fqn.endsWith("$class") }.map(typeResult) def oldSearchSymbols(terms: List[String], max: Int) = index.searchClassesMethods(terms, max).flatMap { case hit if hit.declAs == DeclaredAs.Class => Some(typeResult(hit)) case hit if hit.declAs == DeclaredAs.Method => Some(MethodSearchResult( hit.fqn, hit.fqn.split("\\\\.").last, hit.declAs, LineSourcePositionHelper.fromFqnSymbol(hit)(config, vfs), hit.fqn.split("\\\\.").init.mkString(".") )) case _ => None // were never supported } override def receive = LoggingReceive { case ImportSuggestionsReq(file, point, names, maxResults) => val suggestions = names.map(oldSearchTypes(_, maxResults)) sender ! ImportSuggestions(suggestions) case PublicSymbolSearchReq(keywords, maxResults) => val suggestions = oldSearchSymbols(keywords, maxResults) sender ! SymbolSearchResults(suggestions) case TypeCompletionsReq(query: String, maxResults: Int) => sender ! SymbolSearchResults(oldSearchTypes(query, maxResults)) } } object Indexer { def apply(index: SearchService)(implicit config: EnsimeConfig, vfs: EnsimeVFS): Props = Props(classOf[Indexer], index, config, vfs) }
j-mckitrick/ensime-sbt
src/sbt-test/ensime-sbt/ensime-server/core/src/main/scala/org/ensime/core/Indexer.scala
Scala
apache-2.0
2,163
package utils import org.apache.http.client.methods._ import org.apache.http.entity.{ContentType, StringEntity} import org.apache.http.impl.client.{CloseableHttpClient, HttpClients} object HttpClientService extends Serializable { def createNewClient(): CloseableHttpClient = { val builder = HttpClients.custom() builder.useSystemProperties() builder.disableRedirectHandling() builder.build() } def addContentAndPayload(request: HttpEntityEnclosingRequestBase, payload: String, contentTypes: String): HttpEntityEnclosingRequestBase = { if (payload != null) { val contentType = ContentType.create(contentTypes) val entity = new StringEntity(payload, contentType) request.setEntity(entity) } request } def createNewDeleteRequest(url: String): HttpDelete = { val request = new HttpDelete(url) request } def createNewPutRequest(url: String, payload: String, contentTypes: String): HttpPut = { val request = new HttpPut(url) addContentAndPayload(request, payload, contentTypes).asInstanceOf[HttpPut] } def createNewPostRequest(url: String, payload: String, contentTypes: String): HttpPost = { val request = new HttpPost(url) addContentAndPayload(request, payload, contentTypes).asInstanceOf[HttpPost] } def createNewGetRequest(url: String): HttpGet = { val request = new HttpGet(url) request } }
amollenkopf/dcos-iot-demo
map-webapp/app/Utils/HttpClientService.scala
Scala
apache-2.0
1,402
package examples.demo import java.awt.Dimension import examples.demo.ui.{Circle, Rectangle, ShapesPanel} import rescala.default._ import examples.demo.ui.Shape import scala.swing.{MainFrame, SimpleSwingApplication, UIElement} /** This is a static display of two circles and a rectangle. * It demonstrates, how to display Shapes using our custom * ShapesPanel. The only REScala Feature used here are Vars, * which we explain in the next step. */ object ASwingFrame extends SimpleSwingApplication { override lazy val top = { val panel = new ShapesPanel(Var(List[Shape]( new Circle(center = Var(Pos(75, 30)), diameter = Var(25)), new Circle(Var(Pos(100, 100)), Var(50)), new Rectangle(centerX = Var(-50), centerY = Var(-100), hitboxWidth = Var(10), hitboxHeight = Var(100)) ))) panel.preferredSize = new Dimension(400, 300) new MainFrame { title = "REScala Demo" contents = panel setLocationRelativeTo(new UIElement { override def peer = null }) } } override def main(args: Array[String]): Unit = { super.main(args) while (!top.visible) Thread.sleep(5) while (top.visible) { Thread.sleep(1) /* TODO main loop */ } } }
guidosalva/REScala
Code/Examples/examples/src/main/scala/examples/demo/ASwingFrame.scala
Scala
apache-2.0
1,222
package build import java.nio.charset.StandardCharsets import java.nio.file._ import com.google.common.jimfs.Jimfs import org.scalajs.jsenv._ import org.scalajs.jsenv.nodejs._ final class NodeJSEnvForcePolyfills(config: NodeJSEnv.Config) extends JSEnv { def this() = this(NodeJSEnv.Config()) val name: String = "Node.js forcing polyfills" private val nodeJSEnv = new NodeJSEnv(config) def start(input: Input, runConfig: RunConfig): JSRun = nodeJSEnv.start(patchInput(input), runConfig) def startWithCom(input: Input, runConfig: RunConfig, onMessage: String => Unit): JSComRun = { nodeJSEnv.startWithCom(patchInput(input), runConfig, onMessage) } private def patchInput(input: Input): Input = input match { case Input.ScriptsToLoad(scripts) => Input.ScriptsToLoad(forcePolyfills +: scripts) case _ => throw new UnsupportedInputException(input) } /** File to force all our ES 2015 polyfills to be used, by deleting the * native functions. */ private def forcePolyfills(): Path = { Files.write( Jimfs.newFileSystem().getPath("scalaJSEnvInfo.js"), """ |delete Math.fround; |delete Math.imul; |delete Math.clz32; |delete Math.log10; |delete Math.log1p; |delete Math.cbrt; |delete Math.hypot; |delete Math.expm1; |delete Math.sinh; |delete Math.cosh; |delete Math.tanh; | |delete global.Promise; | |delete global.Int8Array; |delete global.Int16Array; |delete global.Int32Array; |delete global.Uint8Array; |delete global.Uint16Array; |delete global.Uint32Array; |delete global.Float32Array; |delete global.Float64Array; """.stripMargin.getBytes(StandardCharsets.UTF_8)) } }
SebsLittleHelpers/scala-js
project/NodeJSEnvForcePolyfills.scala
Scala
apache-2.0
1,903
/** * Copyright 2013, 2016 Gianluca Amato <[email protected]> * * This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains * JANDOM is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * JANDOM is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of a * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with JANDOM. If not, see <http://www.gnu.org/licenses/>. */ package it.unich.jandom.targets import it.unich.jandom.targets.parameters._ import it.unich.jandom.targets.parameters.NarrowingSpecs._ import it.unich.jandom.targets.parameters.WideningSpecs._ import it.unich.scalafix.BoxAssignment /** * This class is used to provide parameters for analyzers. Each instance of `Parameters` is * connected to a specific target and domain. Other parameters may be changed freely. * @tparam Tgt the type of the target * @author Gianluca Amato <[email protected]> * */ abstract class Parameters[Tgt <: Target[Tgt]] { /** * This is the domain to use for the analysis. It needs to be compatible with the target type. */ val domain: Tgt#DomainBase /** * The property to analyze. */ type Property = domain.Property // we cannot initialize here to `domain.defaultWidening` since domain is initialized later private var _widening: BoxAssignment[Tgt#ProgramPoint, domain.Property] = null /** * The current widening returned as a box assignment. */ def widening: BoxAssignment[Tgt#ProgramPoint, domain.Property] = { if (_widening == null) _widening = domain.defaultWidening _widening } /** * Set the widening starting from a box assignment. */ def widening_=(box: BoxAssignment[Tgt#ProgramPoint, domain.Property]): Unit = { _widening = box } /** * Set the widening starting from a widening specification. */ def widening_=(wspec: WideningSpec): Unit = { _widening = wspec.get(domain) } // we cannot initialize here to `_narrowing.get(domain)` since domain is initialized later private var _narrowing: BoxAssignment[Tgt#ProgramPoint, domain.Property] = null /** * The current narrowing, returned as a box assignment. */ def narrowing: BoxAssignment[Tgt#ProgramPoint, domain.Property] = { if (_narrowing == null) _narrowing = DefaultNarrowing.get(domain) _narrowing } /** * Set the narrowing starting from a box assignment. */ def narrowing_=(box: BoxAssignment[Tgt#ProgramPoint, domain.Property]): Unit = { _narrowing = box } /** * Set the widening starting from a narrowing specification. */ def narrowing_=(nspec: NarrowingSpec): Unit = { _narrowing = nspec.get(domain) } /** * This parameter determines whether results are saved for each program point or only for widening points. */ var allPPResult = true /** * This parameter should be defined for inter-procedural analsysis. */ var interpretation: Option[Interpretation[Tgt, this.type]] = None /** * This parameter determines whether standard or local widening is used. At the moment, this is only supported * by the SLSL target. */ var wideningScope = WideningScope.Output /** * This parameter determines where to put widenings. */ var wideningLocation = WideningNarrowingLocation.Loop /** * This parameter determines where to put narrowings. */ var narrowingLocation = WideningNarrowingLocation.Loop /** * This parameter determine the interlacing strategy between narrowing and widening */ var narrowingStrategy = NarrowingStrategy.Restart /** * This parameter specify the strategy used to compute data-flow equations. */ var iterationStrategy = IterationStrategy.Worklist /** * If it is true, computes an io semantic */ var io = false /** * This is used for putting results in tags */ var tag = scala.collection.mutable.Map[Any, Property]() /** * This is a variable globally used by the analyzer for keeping track of nested level */ var nestingLevel = 0 /** * This is a java writer where the analyzer write debug informations */ var debugWriter = new java.io.Writer { override def write(cbuf: Array[Char], off: Int, len: Int) {} override def flush() {} override def close() {} override def toString = "" } def log(msg: String) { debugWriter.write(" " * nestingLevel * 3) debugWriter.write(msg) } }
amato-gianluca/Jandom
core/src/main/scala/it/unich/jandom/targets/Parameters.scala
Scala
lgpl-3.0
4,774
package io.fsq.twofishes.country.test import io.fsq.specs2.FSSpecificationWithJUnit import io.fsq.twofishes.country.{CountryInfo, CountryUtils} import org.specs2.matcher.MatchersImplicits // TODO: See if there's a way to clean up the extra noise this sends to stderr. class CountryUtilsSpec extends FSSpecificationWithJUnit with MatchersImplicits { "US should exist" in { CountryUtils.getNameByCode("US") mustEqual Some("United States") CountryUtils.getNameByCode("USA") mustEqual Some("United States") CountryUtils.getNameByCode("ABC") mustEqual None CountryInfo.getCountryInfoByIsoNumeric(840).map(_.iso2) mustEqual Some("US") } "SA should exist" in { CountryInfo.getCountryInfoByIsoNumeric(682).map(_.iso2) mustEqual Some("SA") } "timezones should work" in { CountryInfo.getCountryInfo("US").get.tzIDs must containTheSameElementsAs( List( "America/Adak", "America/Anchorage", "America/Boise", "America/Chicago", "America/Denver", "America/Detroit", "America/Indiana/Indianapolis", "America/Indiana/Knox", "America/Indiana/Marengo", "America/Indiana/Petersburg", "America/Indiana/Tell_City", "America/Indiana/Vevay", "America/Indiana/Vincennes", "America/Indiana/Winamac", "America/Juneau", "America/Kentucky/Louisville", "America/Kentucky/Monticello", "America/Los_Angeles", "America/Menominee", "America/Metlakatla", "America/New_York", "America/Nome", "America/North_Dakota/Beulah", "America/North_Dakota/Center", "America/North_Dakota/New_Salem", "America/Phoenix", "America/Sitka", "America/Yakutat", "Pacific/Honolulu" ) ) CountryInfo.getCountryInfo("DE").get.tzIDs must containTheSameElementsAs( List("Europe/Berlin", "Europe/Busingen") ) } }
foursquare/fsqio
test/jvm/io/fsq/twofishes/country/test/CountryInfoTest.scala
Scala
apache-2.0
1,955
package com.markfeeney.circlet /** * Signals a request is fully processed and response fully sent. * In a perfect world this would be returned only by server adapters * when they're done sending the response. But it's useful any time * you want to see the result of a handler executing (e.g. in tests). */ object Sent
overthink/circlet
src/main/scala/com/markfeeney/circlet/Sent.scala
Scala
mit
325
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.security.auth import kafka.admin.ZkSecurityMigrator import kafka.utils.{CoreUtils, Logging, TestUtils, ZkUtils} import kafka.zk.ZooKeeperTestHarness import org.apache.kafka.common.KafkaException import org.apache.kafka.common.security.JaasUtils import org.apache.zookeeper.data.ACL import org.junit.Assert._ import org.junit.{After, Before, Test} import scala.collection.JavaConverters._ import scala.util.{Failure, Success, Try} import javax.security.auth.login.Configuration class ZkAuthorizationTest extends ZooKeeperTestHarness with Logging { val jaasFile = kafka.utils.JaasTestUtils.writeJaasContextsToFile(kafka.utils.JaasTestUtils.zkSections) val authProvider = "zookeeper.authProvider.1" var zkUtils: ZkUtils = null @Before override def setUp() { System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, jaasFile.getAbsolutePath) Configuration.setConfiguration(null) System.setProperty(authProvider, "org.apache.zookeeper.server.auth.SASLAuthenticationProvider") super.setUp() zkUtils = ZkUtils(zkConnect, zkSessionTimeout, zkConnectionTimeout, zkAclsEnabled.getOrElse(JaasUtils.isZkSecurityEnabled)) } @After override def tearDown() { if (zkUtils != null) CoreUtils.swallow(zkUtils.close(), this) super.tearDown() System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) System.clearProperty(authProvider) Configuration.setConfiguration(null) } /** * Tests the method in JaasUtils that checks whether to use * secure ACLs and authentication with ZooKeeper. */ @Test def testIsZkSecurityEnabled() { assertTrue(JaasUtils.isZkSecurityEnabled()) Configuration.setConfiguration(null) System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) assertFalse(JaasUtils.isZkSecurityEnabled()) try { Configuration.setConfiguration(null) System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, "no-such-file-exists.conf") JaasUtils.isZkSecurityEnabled() fail("Should have thrown an exception") } catch { case _: KafkaException => // Expected } } /** * Exercises the code in ZkUtils. The goal is mainly * to verify that the behavior of ZkUtils is correct * when isSecure is set to true. */ @Test def testZkUtils() { assertTrue(zkUtils.isSecure) for (path <- zkUtils.persistentZkPaths) { zkUtils.makeSurePersistentPathExists(path) if (ZkUtils.sensitivePath(path)) { val aclList = zkUtils.zkConnection.getAcl(path).getKey assertEquals(s"Unexpected acl list size for $path", 1, aclList.size) for (acl <- aclList.asScala) assertTrue(TestUtils.isAclSecure(acl, sensitive = true)) } else if (!path.equals(ZkUtils.ConsumersPath)) { val aclList = zkUtils.zkConnection.getAcl(path).getKey assertEquals(s"Unexpected acl list size for $path", 2, aclList.size) for (acl <- aclList.asScala) assertTrue(TestUtils.isAclSecure(acl, sensitive = false)) } } // Test that can create: createEphemeralPathExpectConflict zkUtils.createEphemeralPathExpectConflict("/a", "") verify("/a") // Test that can create: createPersistentPath zkUtils.createPersistentPath("/b") verify("/b") // Test that can create: createSequentialPersistentPath val seqPath = zkUtils.createSequentialPersistentPath("/c", "") verify(seqPath) // Test that can update: updateEphemeralPath zkUtils.updateEphemeralPath("/a", "updated") val valueA: String = zkUtils.zkClient.readData("/a") assertTrue(valueA.equals("updated")) // Test that can update: updatePersistentPath zkUtils.updatePersistentPath("/b", "updated") val valueB: String = zkUtils.zkClient.readData("/b") assertTrue(valueB.equals("updated")) info("Leaving testZkUtils") } /** * Tests the migration tool when making an unsecure * cluster secure. */ @Test def testZkMigration() { val unsecureZkUtils = ZkUtils(zkConnect, 6000, 6000, false) try { testMigration(zkConnect, unsecureZkUtils, zkUtils) } finally { unsecureZkUtils.close() } } /** * Tests the migration tool when making a secure * cluster unsecure. */ @Test def testZkAntiMigration() { val unsecureZkUtils = ZkUtils(zkConnect, 6000, 6000, false) try { testMigration(zkConnect, zkUtils, unsecureZkUtils) } finally { unsecureZkUtils.close() } } /** * Tests that the persistent paths cannot be deleted. */ @Test def testDelete() { info(s"zkConnect string: $zkConnect") ZkSecurityMigrator.run(Array("--zookeeper.acl=secure", s"--zookeeper.connect=$zkConnect")) deleteAllUnsecure() } /** * Tests that znodes cannot be deleted when the * persistent paths have children. */ @Test def testDeleteRecursive() { info(s"zkConnect string: $zkConnect") for (path <- ZkUtils.SecureZkRootPaths) { info(s"Creating $path") zkUtils.makeSurePersistentPathExists(path) zkUtils.createPersistentPath(s"$path/fpjwashere", "") } zkUtils.zkConnection.setAcl("/", zkUtils.defaultAcls("/"), -1) deleteAllUnsecure() } /** * Tests the migration tool when chroot is being used. */ @Test def testChroot(): Unit = { val zkUrl = zkConnect + "/kafka" zkUtils.createPersistentPath("/kafka") val unsecureZkUtils = ZkUtils(zkUrl, 6000, 6000, false) val secureZkUtils = ZkUtils(zkUrl, 6000, 6000, true) try { testMigration(zkUrl, unsecureZkUtils, secureZkUtils) } finally { unsecureZkUtils.close() secureZkUtils.close() } } /** * Exercises the migration tool. It is used in these test cases: * testZkMigration, testZkAntiMigration, testChroot. */ private def testMigration(zkUrl: String, firstZk: ZkUtils, secondZk: ZkUtils) { info(s"zkConnect string: $zkUrl") for (path <- ZkUtils.SecureZkRootPaths ++ ZkUtils.SensitiveZkRootPaths) { info(s"Creating $path") firstZk.makeSurePersistentPathExists(path) // Create a child for each znode to exercise the recurrent // traversal of the data tree firstZk.createPersistentPath(s"$path/fpjwashere", "") } // Getting security option to determine how to verify ACLs. // Additionally, we create the consumers znode (not in // securePersistentZkPaths) to make sure that we don't // add ACLs to it. val secureOpt: String = if (secondZk.isSecure) { firstZk.createPersistentPath(ZkUtils.ConsumersPath) "secure" } else { secondZk.createPersistentPath(ZkUtils.ConsumersPath) "unsecure" } ZkSecurityMigrator.run(Array(s"--zookeeper.acl=$secureOpt", s"--zookeeper.connect=$zkUrl")) info("Done with migration") for (path <- ZkUtils.SecureZkRootPaths ++ ZkUtils.SensitiveZkRootPaths) { val sensitive = ZkUtils.sensitivePath(path) val listParent = secondZk.zkConnection.getAcl(path).getKey assertTrue(path, isAclCorrect(listParent, secondZk.isSecure, sensitive)) val childPath = path + "/fpjwashere" val listChild = secondZk.zkConnection.getAcl(childPath).getKey assertTrue(childPath, isAclCorrect(listChild, secondZk.isSecure, sensitive)) } // Check consumers path. val consumersAcl = firstZk.zkConnection.getAcl(ZkUtils.ConsumersPath).getKey assertTrue(ZkUtils.ConsumersPath, isAclCorrect(consumersAcl, false, false)) } /** * Verifies that the path has the appropriate secure ACL. */ private def verify(path: String): Boolean = { val sensitive = ZkUtils.sensitivePath(path) val list = zkUtils.zkConnection.getAcl(path).getKey list.asScala.forall(TestUtils.isAclSecure(_, sensitive)) } /** * Verifies ACL. */ private def isAclCorrect(list: java.util.List[ACL], secure: Boolean, sensitive: Boolean): Boolean = { val isListSizeCorrect = if (secure && !sensitive) list.size == 2 else list.size == 1 isListSizeCorrect && list.asScala.forall( if (secure) TestUtils.isAclSecure(_, sensitive) else TestUtils.isAclUnsecure ) } /** * Sets up and starts the recursive execution of deletes. * This is used in the testDelete and testDeleteRecursive * test cases. */ private def deleteAllUnsecure() { System.setProperty(JaasUtils.ZK_SASL_CLIENT, "false") val unsecureZkUtils = ZkUtils(zkConnect, 6000, 6000, false) val result: Try[Boolean] = { deleteRecursive(unsecureZkUtils, "/") } // Clean up before leaving the test case unsecureZkUtils.close() System.clearProperty(JaasUtils.ZK_SASL_CLIENT) // Fail the test if able to delete result match { case Success(_) => // All done case Failure(e) => fail(e.getMessage) } } /** * Tries to delete znodes recursively */ private def deleteRecursive(zkUtils: ZkUtils, path: String): Try[Boolean] = { info(s"Deleting $path") var result: Try[Boolean] = Success(true) for (child <- zkUtils.getChildren(path)) result = (path match { case "/" => deleteRecursive(zkUtils, s"/$child") case path => deleteRecursive(zkUtils, s"$path/$child") }) match { case Success(_) => result case Failure(e) => Failure(e) } path match { // Do not try to delete the root case "/" => result // For all other paths, try to delete it case path => try { zkUtils.deletePath(path) Failure(new Exception(s"Have been able to delete $path")) } catch { case _: Exception => result } } } }
ollie314/kafka
core/src/test/scala/unit/kafka/security/auth/ZkAuthorizationTest.scala
Scala
apache-2.0
10,477
/* * Copyright 2014–2020 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.impl.datasources import slamdata.Predef._ import quasar.{Condition, IdStatus, RenderTreeT, ScalarStages} import quasar.api.SchemaConfig import quasar.api.datasource._ import quasar.api.datasource.DatasourceError._ import quasar.api.resource._ import quasar.connector.ResourceSchema import quasar.connector.datasource.Loader import quasar.contrib.iota._ import quasar.contrib.scalaz.MonadError_ import quasar.impl.{CachedGetter, IndexedSemaphore, QuasarDatasource, ResourceManager}, CachedGetter.Signal._ import quasar.impl.storage.IndexedStore import quasar.qscript.{construction, educatedToTotal, InterpretedRead, QScriptEducated} import cats.effect.{Concurrent, ContextShift, Sync, Resource} import cats.~> import matryoshka.{BirecursiveT, EqualT, ShowT} import fs2.Stream import scalaz.{\\/, -\\/, \\/-, ISet, EitherT, Equal} import scalaz.syntax.either._ import scalaz.syntax.equal._ import scalaz.syntax.monad._ import scalaz.syntax.std.boolean._ import scalaz.syntax.std.option._ import shims.{monadToScalaz, equalToCats} import shims.effect.scalazEitherTSync private[quasar] final class DefaultDatasources[ T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT, F[_]: Sync: MonadError_[?[_], CreateError[C]], I: Equal, C: Equal, S <: SchemaConfig, R] private( semaphore: IndexedSemaphore[F, I], freshId: F[I], refs: IndexedStore[F, I, DatasourceRef[C]], modules: DatasourceModules[T, F, Stream[F, ?], I, C, R, ResourcePathType], getter: CachedGetter[F, I, DatasourceRef[C]], cache: ResourceManager[F, I, QuasarDatasource[T, F, Stream[F, ?], R, ResourcePathType]], errors: DatasourceErrors[F, I], schema: ResourceSchema[F, S, (ResourcePath, R)], byteStores: ByteStores[F, I]) extends Datasources[F, Stream[F, ?], I, C, S] { type PathType = ResourcePathType def addDatasource(ref: DatasourceRef[C]): F[CreateError[C] \\/ I] = for { i <- freshId c <- addRef[CreateError[C]](i, ref) } yield Condition.disjunctionIso.get(c).as(i) def allDatasourceMetadata: F[Stream[F, (I, DatasourceMeta)]] = Sync[F].pure(refs.entries.evalMap { case (i, DatasourceRef(k, n, _)) => errors.datasourceError(i) map { e => (i, DatasourceMeta.fromOption(k, n, e)) } }) def datasourceRef(i: I): F[ExistentialError[I] \\/ DatasourceRef[C]] = EitherT(lookupRef[ExistentialError[I]](i)) .map(modules.sanitizeRef(_)) .run def datasourceStatus(i: I): F[ExistentialError[I] \\/ Condition[Exception]] = EitherT(lookupRef[ExistentialError[I]](i)) .flatMap(_ => EitherT.rightT(errors.datasourceError(i))) .map(Condition.optionIso.reverseGet(_)) .run def pathIsResource(i: I, path: ResourcePath): F[ExistentialError[I] \\/ Boolean] = getQDS[ExistentialError[I]](i) .flatMap(ds => EitherT.rightT(ds.pathIsResource(path))) .run def prefixedChildPaths(i: I, prefixPath: ResourcePath): F[DiscoveryError[I] \\/ Stream[F, (ResourceName, ResourcePathType)]] = getQDS[DiscoveryError[I]](i) .flatMapF(_.prefixedChildPaths(prefixPath) map { _.toRightDisjunction(pathNotFound[DiscoveryError[I]](prefixPath)) }) .run def removeDatasource(i: I): F[Condition[ExistentialError[I]]] = refs.delete(i).ifM( dispose(i).as(Condition.normal[ExistentialError[I]]()), Condition.abnormal(datasourceNotFound[I, ExistentialError[I]](i)).point[F]) def replaceDatasource(i: I, ref: DatasourceRef[C]): F[Condition[DatasourceError[I, C]]] = throughSemaphore[F](i, λ[F ~> F](x => x)) apply { lazy val notFound = Condition.abnormal(datasourceNotFound[I, DatasourceError[I, C]](i)) getter(i) flatMap { // We're replacing, emit abnormal condition if there was no ref case Empty => notFound.point[F] case Removed(_) => // it's removed, but resource hasn't been finalized dispose(i).as(notFound) case existed => for { // We have a ref, start replacement _ <- refs.insert(i, ref) signal <- getter(i) res <- signal match { case Inserted(_) => addRef[DatasourceError[I, C]](i, ref) case Preserved(_) => Condition.normal[DatasourceError[I, C]]().point[F] case Updated(incoming, old) if DatasourceRef.atMostRenamed(incoming, old) => setRef(i, incoming) case Updated(_, _) => dispose(i) >> addRef[DatasourceError[I, C]](i, ref) // These two cases can't happen. case Empty => notFound.point[F] case Removed(_) => dispose(i).as(notFound) } } yield res } } def resourceSchema(i: I, path: ResourcePath, schemaConfig: S): F[DiscoveryError[I] \\/ schemaConfig.Schema] = { val action = for { mds <- getQDS[DiscoveryError[I]](i) fr = mds match { case QuasarDatasource.Lightweight(lw) => lw.loaders.head match { case Loader.Batch(b) => b.loadFull(InterpretedRead(path, ScalarStages.Id)) } case QuasarDatasource.Heavyweight(hw) => hw.loaders.head match { case Loader.Batch(b) => b.loadFull(dsl.Read(path, IdStatus.ExcludeId)) } } r <- EitherT.rightT(fr) res <- EitherT.rightT(schema(schemaConfig, (path, r))) } yield res action.run } def supportedDatasourceTypes: F[ISet[DatasourceType]] = modules.supportedTypes type QDS = QuasarDatasource[T, F, Stream[F, ?], R, PathType] def quasarDatasourceOf(i: I): F[Option[QDS]] = getQDS[ExistentialError[I]](i).toOption.run private def getQDS[E >: ExistentialError[I] <: DatasourceError[I, C]](i: I): EitherT[F, E, QDS] = { type Res[A] = EitherT[F, E, A] type L[M[_], A] = EitherT[M, E, A] lazy val error: Res[QDS] = EitherT.pureLeft(datasourceNotFound[I, E](i)) lazy val fromCache: Res[QDS] = cache.get(i).liftM[L] flatMap { case None => error case Some(a) => EitherT.pure(a) } throughSemaphore[Res](i, λ[F ~> Res](x => x.liftM[L])).apply { getter(i).liftM[L] flatMap { case Empty => error case Removed(_) => dispose(i).liftM[L] >> error case Inserted(ref) => for { allocated <- createErrorHandling(modules.create(i, ref)).allocated.liftM[L] _ <- cache.manage(i, allocated).liftM[L] } yield allocated._1 case Updated(incoming, old) if DatasourceRef.atMostRenamed(incoming, old) => fromCache case Preserved(_) => fromCache case Updated(ref, _) => for { _ <- dispose(i).liftM[L] allocated <- createErrorHandling(modules.create(i, ref)).allocated.liftM[L] _ <- cache.manage(i, allocated).liftM[L] } yield allocated._1 } } } private val dsl = construction.mkGeneric[T, QScriptEducated[T, ?]] private def addRef[E >: CreateError[C] <: DatasourceError[I, C]](i: I, ref: DatasourceRef[C]): F[Condition[E]] = { val action = for { _ <- verifyNameUnique[E](ref.name, i) // Grab managed ds and if it's presented shut it down mbCurrent <- EitherT.rightT(cache.get(i)) _ <- EitherT.rightT(mbCurrent.fold(().point[F])(_ => dispose(i))) allocated <- EitherT(modules.create(i, ref).run.allocated map { case (-\\/(e), _) => -\\/(e: E) case (\\/-(a), finalize) => \\/-((a, finalize)) }) _ <- EitherT.rightT(refs.insert(i, ref)) _ <- EitherT.rightT(cache.manage(i, allocated)) } yield () action.run.map(Condition.disjunctionIso.reverseGet(_)) } private def setRef(i: I, ref: DatasourceRef[C]): F[Condition[DatasourceError[I, C]]] = { val action = for { _ <- verifyNameUnique[DatasourceError[I, C]](ref.name, i) _ <- EitherT.rightT(refs.insert(i, ref)) } yield () action.run.map(Condition.disjunctionIso.reverseGet(_)) } private def lookupRef[E >: ExistentialError[I] <: DatasourceError[I, C]](i: I): F[E \\/ DatasourceRef[C]] = refs.lookup(i).map { case None => datasourceNotFound[I, E](i).left case Some(a) => a.right } private def verifyNameUnique[E >: CreateError[C] <: DatasourceError[I, C]](name: DatasourceName, i: I): EitherT[F, E, Unit] = EitherT { refs.entries .exists(t => t._2.name === name && t._1 =/= i) .compile .fold(false)(_ || _) .map(_ ? datasourceNameExists[E](name).left[Unit] | ().right) } private def dispose(i: I): F[Unit] = cache.shutdown(i) >> byteStores.clear(i) private val createErrorHandling: EitherT[Resource[F, ?], CreateError[C], ?] ~> Resource[F, ?] = λ[EitherT[Resource[F, ?], CreateError[C], ?] ~> Resource[F, ?]]( inp => inp.run.flatMap(_.fold( (x: CreateError[C]) => Resource.liftF(MonadError_[F, CreateError[C]].raiseError(x)), _.point[Resource[F, ?]]))) private def throughSemaphore[G[_]: Sync](i: I, fg: F ~> G): G ~> G = λ[G ~> G]{ ga => semaphore.get(i).mapK(fg).use(_ => ga) } } object DefaultDatasources { def apply[ T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT, F[_]: Concurrent: ContextShift: MonadError_[?[_], CreateError[C]], I: Equal, C: Equal, S <: SchemaConfig, R]( freshId: F[I], refs: IndexedStore[F, I, DatasourceRef[C]], modules: DatasourceModules[T, F, Stream[F, ?], I, C, R, ResourcePathType], cache: ResourceManager[F, I, QuasarDatasource[T, F, Stream[F, ?], R, ResourcePathType]], errors: DatasourceErrors[F, I], schema: ResourceSchema[F, S, (ResourcePath, R)], byteStores: ByteStores[F, I]) : F[DefaultDatasources[T, F, I, C, S, R]] = for { semaphore <- IndexedSemaphore[F, I] getter <- CachedGetter(refs.lookup(_)) } yield new DefaultDatasources(semaphore, freshId, refs, modules, getter, cache, errors, schema, byteStores) }
slamdata/quasar
impl/src/main/scala/quasar/impl/datasources/DefaultDatasources.scala
Scala
apache-2.0
10,568
package novo import al.strategies._ import ml.Pattern import ml.classifiers.Learner import scala.io.Source trait Args extends App { private lazy val argsb = args filter (x => x.endsWith("=y") || x.endsWith("=n")) private lazy val argsn = args filter (x => x.split('=').last.filter(x => x != '.' && x != '-').forall(x => x.isDigit)) private lazy val argsl = args filter (x => x.contains(",") || x.startsWith("file=") || x.startsWith("datasets=") || x.startsWith("neigs=")) private lazy val argst = args diff argsb diff argsn diff argsl lazy val argb = { // val tmp = (argsb map parse map (x => x._1 -> x._2.equals("y"))).toMap // val tmp2 = if (tmp.contains("dry")) tmp else Map("dry" -> false) ++ tmp // if (tmp2.contains("clear")) tmp2 else Map("clear" -> false) ++ tmp2 } lazy val argi = argsn map parse map (x => x._1 -> x._2.toInt) toMap lazy val argl = argsl map parse map { case ("file", file) => "file" -> Source.fromFile(file).getLines().toList case ("neigs", n) if !n.contains(",") => "neigs" -> List(n) case (k, v) => k -> v.split(',').toList } toMap lazy val argt = argst map parse toMap def parse(s: String) = { val Seq(a, b) = s.split('=').toSeq a -> b } def strat(pool: Seq[Pattern], l: Learner) = argt("strat") match { case "?" => println("Strats: rnd, mar, tu, eer, hs, sg"); sys.exit case "tu" => TU(pool, l, pool) case "mar" => Mar(l, pool) case "hs" => HS(pool) case "eer" => EER(l, pool, "entropy") case "sg" => SG(l, pool, "consensus") case "rnd" => Rnd(pool) case x => sys.error(s"Strategy $x not reconized.") } }
active-learning/active-learning-scala
src/main/scala/novo/Args.scala
Scala
gpl-2.0
1,637
package ch.fhnw.ima.saav.model import ch.fhnw.ima.saav._ import ch.fhnw.ima.saav.model.domain.{IndicatorId, SubCriteriaId} object weight { sealed trait Weight final case class Quality(weight: Double) extends Weight case object Profile extends Weight final case class Weights(subCriteriaWeights: Map[SubCriteriaId, Weight], enabledIndicators: Set[IndicatorId]) // Works around SI-7046, a scalac issue which will be fixed in 2.11.9 / 2.12.1 // https://gitter.im/travisbrown/circe?at=582b833a37fbab5354b90cba object Weight { implicit val decodeWeight: io.circe.Decoder[Weight] = io.circe.generic.semiauto.deriveDecoder } private[model] def weightedMean(valuesWithWeight: Seq[(Double, Double)]): Option[Double] = { if (valuesWithWeight.isEmpty) None else { val (sumOfProducts, sumOfWeights) = valuesWithWeight.foldLeft((0d, 0d)) { case ((products, weights), (v, w)) => (products + (v * w), weights + w) } if (sumOfWeights == 0) { None } else { Some(sumOfProducts / sumOfWeights) } } } private[model] def mean(values: Seq[Double]): Option[Double] = { if (values.isEmpty) None else Some(values.sum / values.length) } }
fhnw-saav/saav
src/main/scala/ch/fhnw/ima/saav/model/weight.scala
Scala
mit
1,225
/** * Copyright 2015, 2016 Gianluca Amato <[email protected]> * * This file is part of ScalaFix. * ScalaFix is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * ScalaFix is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of a * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with ScalaFix. If not, see <http://www.gnu.org/licenses/>. */ package it.unich.scalafix.finite import it.unich.scalafix.utils.Relation import scala.annotation.tailrec import scala.collection.mutable /** * This class represents a depth-first ordering of a graph, as it appears in the Aho, Sehti, Ullman book * on compilers. It extends the concept of graph ordering distinguishing between Advancing, Retreating * and Cross edges. * * @tparam N the type of the nodes of the graph */ abstract class DFOrdering[N] extends GraphOrdering[N] { import DFOrdering.EdgeType._ /** * It returns the type of an edge u -> v. * * @param u source node * @param v target node */ def edgeType(u: N, v: N): EdgeType } /** * The companion class for a DFOrdering defines the required enumerations and factory * methods. */ object DFOrdering { /** * Every edge may be of three different kinds: Advancing, Retreating and Cross. */ object EdgeType extends Enumeration { type EdgeType = Value val Advancing = Value val Retreating = Value val Cross = Value } /** * Returns the DFOrdering for a finite equation system. */ def apply[N](eqs: FiniteEquationSystem[N, _]): DFOrdering[N] = new DFOrderingFromR[N](eqs.infl, eqs.unknowns, eqs.inputUnknowns) /** * Returns a DFOrdering for the graph encoded by the adjacency relation `r`, set of nodes in `nodes` and * set of initial nodes in `entries`. */ def apply[N](r: Relation[N], nodes: Iterable[N], entries: Iterable[N]): DFOrdering[N] = new DFOrderingFromR[N](r, nodes, entries) /** * This class is a depth-first ordering for the influence relation `relation`. * * @param r the adjacency relation from which we compute the DFOrdering. * @param nodes the set of all initial nodes * @param entries nodes from which to start the visit. */ private final class DFOrderingFromR[N](r: Relation[N], nodes: Iterable[N], entries: Iterable[N]) extends DFOrdering[N] { import DFOrdering.EdgeType._ val stringPrefix = "GraphOrdering" private val dfn = mutable.HashMap.empty[N, Int] // Internal computation private val dfst = mutable.Set.empty[(N, N)] // Depth-First spanning tree private val heads = mutable.Set.empty[N] // Set of heads initDFO() def initDFO() { val visited = mutable.LinkedHashSet.empty[N] var c = 0 for (x <- entries) if (!(visited contains x)) dfsVisit(x) for (x <- nodes) if (!(visited contains x)) dfsVisit(x) def dfsVisit(u: N) { visited += u for (v <- r(u)) if (!(visited contains v)) { dfst += (u -> v) dfsVisit(v) } else if (!dfn.isDefinedAt(v)) heads += v dfn += u -> c c -= 1 } } lazy val toSeq: Seq[N] = nodes.toSeq.sorted(this) def compare(x: N, y: N): Int = scala.math.signum(dfn(x) - dfn(y)) /** * Returns whether y is a child of x in the depth-first spanning tree. */ @tailrec private def connected(x: N, y: N): Boolean = { val z = dfst.find(_._2 == y) if (z.isEmpty) false else if (z.get._1 == x) true else connected(x, z.get._1) } def edgeType(x: N, y: N): EdgeType = if (y <= x) Retreating else if (connected(x, y)) Advancing else Cross def isHead(u: N): Boolean = heads contains u } }
jandom-devel/ScalaFix
core/src/main/scala/it/unich/scalafix/finite/DFOrdering.scala
Scala
gpl-3.0
4,167
/* * Copyright (c) 2014 Paul Bernard * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Spectrum Finance is based in part on: * QuantLib. http://quantlib.org/ * */ package org.quantintel.ql.currencies import org.quantintel.ql.math.Rounding object Data{ def apply (name: String, code: String, numericCode: Int, symbol: String, fractionSymbol: String, fractionPerUnit: Int, rounding: Rounding, formatString: String, triangulationCurrency: Currency) : Data = { new Data(name, code, numericCode, symbol, fractionSymbol, fractionPerUnit, rounding, formatString, triangulationCurrency) } def apply (name: String, code: String, numericCode: Int, symbol: String, fractionSymbol: String, fractionPerUnit: Int, rounding: Rounding, formatString: String) : Data = { new Data(name, code, numericCode, symbol, fractionSymbol, fractionPerUnit, rounding, formatString) } def apply : Data = new Data() } class Data { var name: String = null var code: String = null var numericCode: Int = 0 var symbol: String = null var fractionSymbol: String = null var fractionPerUnit: Int = 0 var rounding: Rounding = null var formatString: String = null var triangulationCurrency: Currency = null def this (name: String, code: String, numericCode: Int, symbol: String, fractionSymbol: String, fractionPerUnit: Int, rounding: Rounding, formatString: String, triangulationCurrency: Currency) { this this.name = name this.code = code this.numericCode = numericCode this.symbol = symbol this.fractionSymbol = fractionSymbol this.fractionPerUnit = fractionPerUnit this.rounding = rounding this.formatString = formatString this.triangulationCurrency = triangulationCurrency } def this (name: String, code: String, numericCode: Int, symbol: String, fractionSymbol: String, fractionPerUnit: Int, rounding: Rounding, formatString: String){ this(name, code, numericCode, symbol, fractionSymbol, fractionPerUnit, rounding, formatString, new Currency()) } override def clone: Data = { Data(name, code, numericCode, symbol, fractionSymbol, fractionPerUnit, rounding, formatString, triangulationCurrency.clone) } } object Currency { def apply(name: String, code: String, numericCode: Int, symbol: String, fractionSymbol: String, fractionPerUnit: Int, rounding: Rounding, formatString: String, triangulationCurrency: Currency): Currency = new Currency(name, code, numericCode, symbol, fractionSymbol, fractionPerUnit, rounding, formatString, triangulationCurrency) def == (c1: Currency, c2: Currency) : Boolean = { c1.equals(c2) } def != (c1: Currency, c2: Currency) : Boolean = { Currency != (c1, c2) } } /** * @author Paul Bernard */ class Currency { var data : Data = new Data() def this (name: String, code: String, numericCode: Int, symbol: String, fractionSymbol: String, fractionPerUnit: Int, rounding: Rounding, formatString: String, tc: Currency){ this data = Data(name, code, numericCode, symbol, fractionSymbol, fractionPerUnit, rounding, formatString) } def name : String = data.name def code : String = data.code def numericCode: Int = data.numericCode def symbol: String = data.symbol def fractionSymbol : String = data.fractionSymbol def fractionPerUnit: Int = data.fractionPerUnit def rounding: Rounding = data.rounding def formatString: String = data.formatString def triangulationCurrency : Currency = data.triangulationCurrency def == (currency: Currency) : Boolean = equals(currency) def != (currency: Currency) : Boolean = !eq(currency) def empty : Boolean = data == null override def toString : String = { if (!empty) code else "(null currency" } override def equals(obj: Any): Boolean = { if (this == obj) true else if (obj == null) false else obj.isInstanceOf[Currency] && obj.asInstanceOf[Currency].fEquals(this) } override def hashCode: Int = { val prime : Int = 31 var result: Int = 1 result = prime * result + (if (data == null) 0 else data.hashCode) result = prime * result + (if (data ==null) 0 else name.hashCode) result } def fEquals(other: Currency): Boolean = { if (this.empty && other.empty) true else if (this.name.equals(other.name)) true else false } override def clone : Currency = { val currency = new Currency if (data != null) currency.data = data.clone() currency } }
quantintel/spectrum
financial/src/main/scala/org/quantintel/ql/currencies/Currency.scala
Scala
apache-2.0
5,489
package com.cloudray.scalapress.search import com.cloudray.scalapress.framework.{ScalapressContext, MenuProvider, MenuItem} /** @author Stephen Samuel */ class SearchMenuProvider extends MenuProvider { def menu(context: ScalapressContext): (String, Seq[MenuItem]) = { ("Search", Seq( MenuItem("Search Settings", Some("glyphicon glyphicon-search"), "/backoffice/searchsettings"), MenuItem("Search Forms", Some("glyphicon glyphicon-align-center"), "/backoffice/searchform"), MenuItem("Saved Searches", Some("glyphicon glyphicon-save"), "/backoffice/savedsearch") )) } }
vidyacraghav/scalapress
src/main/scala/com/cloudray/scalapress/search/SearchMenuProvider.scala
Scala
apache-2.0
615
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.common import org.junit.Assert._ import collection.mutable.ArrayBuffer import org.junit.Test import kafka.producer.ProducerConfig import kafka.consumer.ConsumerConfig class ConfigTest { @Test @deprecated("This test is deprecated and it will be removed in a future release.", "0.10.0.0") def testInvalidClientIds() { val invalidClientIds = new ArrayBuffer[String]() val badChars = Array('/', '\\\\', ',', '\\u0000', ':', "\\"", '\\'', ';', '*', '?', ' ', '\\t', '\\r', '\\n', '=') for (weirdChar <- badChars) { invalidClientIds += "Is" + weirdChar + "illegal" } for (i <- 0 until invalidClientIds.size) { try { ProducerConfig.validateClientId(invalidClientIds(i)) fail("Should throw InvalidClientIdException.") } catch { case _: InvalidConfigException => // This is good } } val validClientIds = new ArrayBuffer[String]() validClientIds += ("valid", "CLIENT", "iDs", "ar6", "VaL1d", "_0-9_.", "") for (i <- 0 until validClientIds.size) { try { ProducerConfig.validateClientId(validClientIds(i)) } catch { case _: Exception => fail("Should not throw exception.") } } } @Test def testInvalidGroupIds() { val invalidGroupIds = new ArrayBuffer[String]() val badChars = Array('/', '\\\\', ',', '\\u0000', ':', "\\"", '\\'', ';', '*', '?', ' ', '\\t', '\\r', '\\n', '=') for (weirdChar <- badChars) { invalidGroupIds += "Is" + weirdChar + "illegal" } for (i <- 0 until invalidGroupIds.size) { try { ConsumerConfig.validateGroupId(invalidGroupIds(i)) fail("Should throw InvalidGroupIdException.") } catch { case _: InvalidConfigException => // This is good } } val validGroupIds = new ArrayBuffer[String]() validGroupIds += ("valid", "GROUP", "iDs", "ar6", "VaL1d", "_0-9_.", "") for (i <- 0 until validGroupIds.size) { try { ConsumerConfig.validateGroupId(validGroupIds(i)) } catch { case _: Exception => fail("Should not throw exception.") } } } }
wangcy6/storm_app
frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/common/ConfigTest.scala
Scala
apache-2.0
2,934
package forimpatient.chapter02 /** * Created by Iryna Kharaborkina on 7/25/16. * * Solution to the Chapter 02 Exercise 07 'Scala for the Impatient' by Horstmann C.S. * * Without loop compute the product of the Unicode codes of all letters in a string. For * example, the product of the characters in "Hello" is 9415087488L. (Hint: Look at the StringOps Scaladoc.) */ object Exercise07 extends App { println("Chapter 02 Exercise 07") val prod = "Hello".map(_.toLong).product assert(prod == 9415087488L) println(prod) }
Kiryna/Scala-for-the-Impatient
src/forimpatient/chapter02/Exercise07.scala
Scala
apache-2.0
542
package assets import java.text.SimpleDateFormat import java.util.TimeZone import com.github.nscala_time.time.Imports._ import play.utils.UriEncoding /** * Created by oscar on 12/17/14. */ sealed trait Segment { def contains (s: Segment): Boolean def contains (p: Long): Boolean def intersects (s: Segment): Boolean def starts (p: Long): Boolean def ends (p: Long): Boolean } object Segment { case object NoSegment extends Segment { override def contains(p: Long): Boolean = false override def contains (s: Segment): Boolean = false override def intersects(s: Segment): Boolean = false override def starts(p: Long): Boolean = false override def ends(p: Long): Boolean = false } case class Empty(At: Long) extends Segment { override def contains(p: Long): Boolean = At == p override def contains(s: Segment): Boolean = s match { case NoSegment => true case Empty(x) => contains(x) case Cons(x,y) => false } override def intersects(s: Segment): Boolean = contains(s) override def starts(p: Long): Boolean = contains(p) override def ends(p: Long): Boolean = contains(p) } case class Cons(From: Long, To: Long) extends Segment { override def contains(p: Long): Boolean = From <= p && p <= To override def contains(s: Segment): Boolean = s match { case NoSegment => true case Empty(x) => contains(x) case Cons(x,y) => contains(x) && contains(y) } override def intersects(s: Segment): Boolean = s match { case NoSegment => true case Empty(x) => contains(x) case Cons(x,y) => contains(x) || contains(y) } override def starts(p: Long): Boolean = p == From override def ends(p: Long): Boolean = p == To } def length(s: Segment): Long = s match { case NoSegment => 0L case Empty(_) => 0L case Cons(x,y) => y - x } def encodePoint(p: Long): String = { val dFormat = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss") dFormat.setTimeZone(TimeZone.getTimeZone("GMT")) UriEncoding.encodePathSegment(dFormat.format(p), "UTF-8") } def encodePoints(ps: List[Long]): String = ps.map(encodePoint).mkString("~") def uriEncodeSegment (s: Segment): String = s match { case NoSegment => "" case Empty(x) => encodePoints(List(x, x)) case Cons(x,y) => encodePoints(List(x, y)) } implicit class SegmentUtils(s: Segment) { def uriEncode: String = uriEncodeSegment (s) def length: Long = Segment.length(s) } def apply(start:DateTime, end:DateTime): Segment = apply(start.getMillis, end.getMillis) def apply(start:Long, end:Long) = { val diff = end - start diff match { case 0 => Empty(start) case d => if (d < 0) NoSegment else Cons(start, end) } } }
wigahluk/funes
app/assets/Segment.scala
Scala
apache-2.0
3,019
package io.flow.lint import io.apibuilder.spec.v0.models._ import org.scalatest.funspec.AnyFunSpec import org.scalatest.matchers.should.Matchers class UpsertedDeletedEventModelsSpec extends AnyFunSpec with Matchers { private[this] val linter = linters.UpsertedDeletedEventModels def buildService(modelName: String, fieldName: String, fieldType: String): Service = { Services.Base.copy( models = Seq( Services.buildModel( name = modelName, Seq(Services.buildField(name = fieldName, `type` = fieldType)) ) ) ) } it("with valid names") { linter.validate(buildService("example_upserted", "example", "example")) should be (Nil) linter.validate(buildService("example_upserted", "foo", "example")) should be ( Seq("Model example_upserted: Event must contain a field whose name and type contain example") ) } it("with partial names") { linter.validate(buildService("card_authorization_upserted", "card_authorization", "card_authorization")) should be(Nil) linter.validate(buildService("card_authorization_upserted", "card", "card_authorization")) should be(Nil) linter.validate(buildService("card_authorization_upserted", "authorization", "card_authorization")) should be(Nil) linter.validate(buildService("card_authorization_upserted", "foo", "card_authorization")) should be( Seq("Model card_authorization_upserted: Event must contain a field whose name and type contain card or authorization") ) linter.validate(buildService("card_authorization_upserted", "card_authorization", "foo")) should be( Seq("Model card_authorization_upserted: Event must contain a field whose name and type contain card or authorization") ) linter.validate(buildService("card_authorization_deleted", "card_authorization", "foo")) should be( Seq("Model card_authorization_deleted: Event must contain a field whose name and type contain card or authorization") ) } it("deleted events can just use 'id' w/ type string") { linter.validate(buildService("card_authorization_deleted", "id", "string")) should be(Nil) linter.validate(buildService("card_authorization_deleted", "id", "object")) should be( Seq("Model card_authorization_deleted: Type of field 'id' must be 'string' and not 'object'") ) } it("ignores legacy models") { linter.validate(buildService("item_origin_deleted", "foo", "item_origin")) should be(Nil) } }
flowcommerce/api-lint
src/test/scala/io/flow/lint/UpsertedDeletedEventModelsSpec.scala
Scala
mit
2,473
/* * Copyright 2013 Maurício Linhares * * Maurício Linhares licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.github.mauricio.async.db.general import scala.collection.IndexedSeq import collection.mutable.ArrayBuffer import com.github.mauricio.async.db.{RowData, ResultSet} import com.github.mauricio.async.db.util.Log object MutableResultSet { val log = Log.get[MutableResultSet[Nothing]] } class MutableResultSet[T <: ColumnData](val columnTypes: ArrayBuffer[T]) extends ResultSet { private val rows = new ArrayBuffer[RowData]() private val columnMapping: Map[String, Int] = this.columnTypes.indices .map(index => (this.columnTypes(index).name, index)) .toMap val columnNames: IndexedSeq[String] = this.columnTypes.map(c => c.name) val types: IndexedSeq[Int] = this.columnTypes.map(c => c.dataType) override def length: Int = this.rows.length override def apply(idx: Int): RowData = this.rows(idx) def addRow(row: Array[Any]): Unit = { this.rows += new ArrayRowData(this.rows.size, this.columnMapping, row) } }
dripower/postgresql-async
db-async-common/src/main/scala/com/github/mauricio/async/db/general/MutableResultSet.scala
Scala
apache-2.0
1,592
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.internal import java.io.File import java.net.URI import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.spark.annotation.Unstable import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.analysis.{Analyzer, FunctionRegistry} import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.optimizer.Optimizer import org.apache.spark.sql.catalyst.parser.ParserInterface import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.connector.catalog.CatalogManager import org.apache.spark.sql.execution._ import org.apache.spark.sql.streaming.StreamingQueryManager import org.apache.spark.sql.util.ExecutionListenerManager import org.apache.spark.util.DependencyUtils /** * A class that holds all session-specific state in a given [[SparkSession]]. * * @param sharedState The state shared across sessions, e.g. global view manager, external catalog. * @param conf SQL-specific key-value configurations. * @param experimentalMethods Interface to add custom planning strategies and optimizers. * @param functionRegistry Internal catalog for managing functions registered by the user. * @param udfRegistration Interface exposed to the user for registering user-defined functions. * @param catalogBuilder a function to create an internal catalog for managing table and database * states. * @param sqlParser Parser that extracts expressions, plans, table identifiers etc. from SQL texts. * @param analyzerBuilder A function to create the logical query plan analyzer for resolving * unresolved attributes and relations. * @param optimizerBuilder a function to create the logical query plan optimizer. * @param planner Planner that converts optimized logical plans to physical plans. * @param streamingQueryManagerBuilder A function to create a streaming query manager to * start and stop streaming queries. * @param listenerManager Interface to register custominternal/SessionState.scala * [[org.apache.spark.sql.util.QueryExecutionListener]]s. * @param resourceLoaderBuilder a function to create a session shared resource loader to load JARs, * files, etc. * @param createQueryExecution Function used to create QueryExecution objects. * @param createClone Function used to create clones of the session state. */ private[sql] class SessionState( sharedState: SharedState, val conf: SQLConf, val experimentalMethods: ExperimentalMethods, val functionRegistry: FunctionRegistry, val udfRegistration: UDFRegistration, catalogBuilder: () => SessionCatalog, val sqlParser: ParserInterface, analyzerBuilder: () => Analyzer, optimizerBuilder: () => Optimizer, val planner: SparkPlanner, val streamingQueryManagerBuilder: () => StreamingQueryManager, val listenerManager: ExecutionListenerManager, resourceLoaderBuilder: () => SessionResourceLoader, createQueryExecution: LogicalPlan => QueryExecution, createClone: (SparkSession, SessionState) => SessionState, val columnarRules: Seq[ColumnarRule], val queryStagePrepRules: Seq[Rule[SparkPlan]]) { // The following fields are lazy to avoid creating the Hive client when creating SessionState. lazy val catalog: SessionCatalog = catalogBuilder() lazy val analyzer: Analyzer = analyzerBuilder() lazy val optimizer: Optimizer = optimizerBuilder() lazy val resourceLoader: SessionResourceLoader = resourceLoaderBuilder() // The streamingQueryManager is lazy to avoid creating a StreamingQueryManager for each session // when connecting to ThriftServer. lazy val streamingQueryManager: StreamingQueryManager = streamingQueryManagerBuilder() def catalogManager: CatalogManager = analyzer.catalogManager def newHadoopConf(): Configuration = SessionState.newHadoopConf( sharedState.sparkContext.hadoopConfiguration, conf) def newHadoopConfWithOptions(options: Map[String, String]): Configuration = { val hadoopConf = newHadoopConf() options.foreach { case (k, v) => if ((v ne null) && k != "path" && k != "paths") { hadoopConf.set(k, v) } } hadoopConf } /** * Get an identical copy of the `SessionState` and associate it with the given `SparkSession` */ def clone(newSparkSession: SparkSession): SessionState = createClone(newSparkSession, this) // ------------------------------------------------------ // Helper methods, partially leftover from pre-2.0 days // ------------------------------------------------------ def executePlan(plan: LogicalPlan): QueryExecution = createQueryExecution(plan) } private[sql] object SessionState { def newHadoopConf(hadoopConf: Configuration, sqlConf: SQLConf): Configuration = { val newHadoopConf = new Configuration(hadoopConf) sqlConf.getAllConfs.foreach { case (k, v) => if (v ne null) newHadoopConf.set(k, v) } newHadoopConf } } /** * Concrete implementation of a [[BaseSessionStateBuilder]]. */ @Unstable class SessionStateBuilder( session: SparkSession, parentState: Option[SessionState], options: Map[String, String]) extends BaseSessionStateBuilder(session, parentState, options) { override protected def newBuilder: NewBuilder = new SessionStateBuilder(_, _, Map.empty) } /** * Session shared [[FunctionResourceLoader]]. */ @Unstable class SessionResourceLoader(session: SparkSession) extends FunctionResourceLoader { override def loadResource(resource: FunctionResource): Unit = { resource.resourceType match { case JarResource => addJar(resource.uri) case FileResource => session.sparkContext.addFile(resource.uri) case ArchiveResource => throw new AnalysisException( "Archive is not allowed to be loaded. If YARN mode is used, " + "please use --archives options while calling spark-submit.") } } def resolveJars(path: URI): Seq[String] = { path.getScheme match { case "ivy" => DependencyUtils.resolveMavenDependencies(path) case _ => path.toString :: Nil } } /** * Add a jar path to [[SparkContext]] and the classloader. * * Note: this method seems not access any session state, but a Hive based `SessionState` needs * to add the jar to its hive client for the current session. Hence, it still needs to be in * [[SessionState]]. */ def addJar(path: String): Unit = { val uri = URI.create(path) resolveJars(uri).foreach { p => session.sparkContext.addJar(p) val uri = new Path(p).toUri val jarURL = if (uri.getScheme == null) { // `path` is a local file path without a URL scheme new File(p).toURI.toURL } else { // `path` is a URL with a scheme uri.toURL } session.sharedState.jarClassLoader.addURL(jarURL) } Thread.currentThread().setContextClassLoader(session.sharedState.jarClassLoader) } }
witgo/spark
sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala
Scala
apache-2.0
7,909
package BIDMat object JPlotting { import org.jfree.chart._ import org.jfree.chart.plot._ import org.jfree.data.xy._ import org.jfree.data.statistics._ import org.jfree.chart.renderer.xy._ import org.jfree.util._ import java.awt.image.BufferedImage var ifigure:Int = 0; val marksmat = Array("points","dots","various"); var plotXscale = 500; var plotYscale = 400; var plotPscale = 0.2f; def _addToDataset(dataset:XYSeriesCollection, mats:Array[Mat]) = { if (mats.length == 1) { val m = mats(0) if (m.nrows == 1 || m.ncols == 1) { val p = new XYSeries("plot"); m match { case mf:FMat => for (i <- 0 until m.length) p.add(i, mf(i)); case md:DMat => for (i <- 0 until m.length) p.add(i, md(i)); case mi:IMat => for (i <- 0 until m.length) p.add(i, mi(i)); } dataset.addSeries(p); } else { for (i <- 0 until m.ncols) { val p = new XYSeries("plot %d" format i); m match { case mf:FMat => for (j <- 0 until m.nrows) p.add(j, mf(j,i)); case md:DMat => for (j <- 0 until m.nrows) p.add(j, md(j,i)); case mi:IMat => for (j <- 0 until m.nrows) p.add(j, mi(j,i)); } dataset.addSeries(p); } } } else { var i = 0 while (i*2 < mats.length) { val p = new XYSeries("plot %d" format i); (mats(2*i), mats(2*i+1)) match { case (a:FMat, b:FMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:FMat, b:DMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:DMat, b:FMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:DMat, b:DMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:FMat, b:IMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:DMat, b:IMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:IMat, b:FMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:IMat, b:DMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) case (a:IMat, b:IMat) => for (j <- 0 until a.length) p.add(a(j), b(j)) } dataset.addSeries(p); i += 1; } } } def _plot(mats:Mat*)(xlog:Boolean=false, ylog:Boolean=false, isconnected:Boolean=true, bars:Boolean=false, marks:Int = 0):BufferedImage = { import java.awt.Color; if (Mat.inline) { System.setProperty("java.awt.headless", "true"); } val fmats = mats.toArray.map(MatFunctions.cpu) val dataset = new XYSeriesCollection(); _addToDataset(dataset, fmats); ifigure += 1; val chart = if (isconnected) { ChartFactory.createXYLineChart("Figure %d" format ifigure, "X", "Y", dataset); } else if (bars) { ChartFactory.createXYBarChart("Figure %d" format ifigure, "X", false, "Y", dataset); } else { ChartFactory.createScatterPlot("Figure %d" format ifigure, "X", "Y", dataset); } val plot = chart.getXYPlot; if (!isconnected && !bars) { val dot = ShapeUtilities.createDiamond(plotPscale); val renderer = plot.getRenderer; for (i <- 0 until dataset.getSeriesCount) renderer.setSeriesShape(i, dot); } if (xlog) { val xaxis = new org.jfree.chart.axis.LogarithmicAxis("X"); plot.setDomainAxis(xaxis); } if (ylog) { val yaxis = new org.jfree.chart.axis.LogarithmicAxis("Y"); plot.setRangeAxis(yaxis); } plot.setBackgroundPaint(Color.white); plot.setDomainGridlinePaint(Color.gray); plot.setRangeGridlinePaint(Color.gray); if (!Mat.inline) { val frame = new ChartFrame("Figure %d" format ifigure, chart); frame.pack(); frame.setVisible(true); } chart.createBufferedImage(plotXscale, plotYscale); } def plot(mats:Mat*) = _plot(mats: _*)(); def scatter(mats:Mat*) = _plot(mats: _*)(marks=1, isconnected=false); def loglog(mats:Mat*) = _plot(mats: _*)(xlog=true, ylog=true) def semilogx(mats:Mat*) = _plot(mats: _*)(xlog=true) def semilogy(mats:Mat*) = _plot(mats: _*)(ylog=true) def barplot(mats:Mat*) = _plot(mats: _*)(isconnected=false, bars=true) def barloglog(mats:Mat*) = _plot(mats: _*)(xlog=true, ylog=true, isconnected=false, bars=true) def barsemilogx(mats:Mat*) = _plot(mats: _*)(xlog=true, isconnected=false, bars=true) def barsemilogy(mats:Mat*) = _plot(mats: _*)(ylog=true, isconnected=false, bars=true) def p_plot(mats:Mat*) = _plot(mats: _*)(isconnected=false) def ploglog(mats:Mat*) = _plot(mats: _*)(xlog=true, ylog=true, isconnected=false) def psemilogx(mats:Mat*) = _plot(mats: _*)(xlog=true, isconnected=false) def psemilogy(mats:Mat*) = _plot(mats: _*)(ylog=true, isconnected=false) def hist(m:Mat, nbars:Int=10):BufferedImage = { import java.awt.Color; if (Mat.inline) { System.setProperty("java.awt.headless", "true"); } var dd = new HistogramDataset(); if (m.nrows == 1 || m.ncols == 1) { m match { case mf:FMat => { dd.addSeries("H1", DMat(mf).data, nbars); } case md:DMat => { dd.addSeries("H1", md.data, nbars); } case mi:IMat => { dd.addSeries("H1", DMat(mi).data, nbars); } } } val show = false; val toolTips = false; val urls = true; ifigure += 1; val chart = ChartFactory.createHistogram("Figure %d" format ifigure, "X", "Count", dd, PlotOrientation.VERTICAL, show, toolTips, urls); val plot = chart.getXYPlot(); plot.getRenderer.asInstanceOf[XYBarRenderer].setBarPainter(new StandardXYBarPainter); plot.setBackgroundPaint(Color.white); if (!Mat.inline) { val frame = new ChartFrame("Figure %d" format ifigure, chart); frame.pack(); frame.setVisible(true); } chart.createBufferedImage(plotXscale, plotYscale); } }
phlip9/BIDMat
src/main/scala/BIDMat/JPlotting.scala
Scala
bsd-3-clause
5,973
package xitrum.util import java.io.File import scala.collection.mutable.{Map => MMap} import sclasner.Discoverer /** * This utility is useful for hot reloading .class files in defined directories * during development. */ class ClassFileLoader extends ClassLoader { // Directories to search for .class files, example: Seq("target/scala-2.11/classes") private val searchDirs = Discoverer.containers.filter(_.isDirectory).map(_.toPath) // Need to cache because calling defineClass twice will cause exception protected val cache = MMap[String, Class[_]]() override def loadClass(className: String): Class[_] = { findClass(className) } override def findClass(className: String): Class[_] = synchronized { cache.get(className) match { case Some(klass) => klass case None => classNameToFilePath(className) match { case None => Thread.currentThread.getContextClassLoader.loadClass(className) case Some(path) => val bytes = Loader.bytesFromFile(path) val klass = defineClass(className, bytes, 0, bytes.length) cache(className) = klass klass } } } //---------------------------------------------------------------------------- /** @return None to use the fallback ClassLoader */ protected def classNameToFilePath(className: String): Option[String] = { val relPath = className.replaceAllLiterally(".", File.separator) + ".class" val paths = searchDirs.map(_ + File.separator + relPath) paths.find(new File(_).exists) } }
caiiiycuk/xitrum
src/main/scala/xitrum/util/ClassFileLoader.scala
Scala
mit
1,602
object HelloWorld{ var myVar : Int = 10 //变量 var myVar1 = 10 val myvar : String = "Foo" //常量 val myvar1 = "Foo" //可省略类型, 但必须有初始化的值 def main(args:Array[String]){ println("Hello, World!"); } }
PengLiangWang/Scala
base_test/HelloWorld.scala
Scala
gpl-3.0
274
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.compiler.v2_3.pipes import org.neo4j.cypher.internal.compiler.v2_3._ import org.neo4j.cypher.internal.compiler.v2_3.executionplan.{Effects, ReadsNodesWithLabels} import org.neo4j.cypher.internal.compiler.v2_3.planDescription.InternalPlanDescription.Arguments.LabelName import org.neo4j.cypher.internal.compiler.v2_3.planDescription.{NoChildren, PlanDescriptionImpl} import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable import org.neo4j.cypher.internal.frontend.v2_3.symbols._ case class NodeByLabelScanPipe(ident: String, label: LazyLabel) (val estimatedCardinality: Option[Double] = None)(implicit pipeMonitor: PipeMonitor) extends Pipe with RonjaPipe { protected def internalCreateResults(state: QueryState): Iterator[ExecutionContext] = { label.id(state.query) match { case Some(labelId) => val nodes = state.query.getNodesByLabel(labelId.id) val baseContext = state.initialContext.getOrElse(ExecutionContext.empty) nodes.map(n => baseContext.newWith1(ident, n)) case None => Iterator.empty } } def exists(predicate: Pipe => Boolean): Boolean = predicate(this) def planDescriptionWithoutCardinality = new PlanDescriptionImpl(this.id, "NodeByLabelScan", NoChildren, Seq(LabelName(label.name)), identifiers) def symbols = new SymbolTable(Map(ident -> CTNode)) override def monitor = pipeMonitor def dup(sources: List[Pipe]): Pipe = { require(sources.isEmpty) this } def sources: Seq[Pipe] = Seq.empty override def localEffects = Effects(ReadsNodesWithLabels(label.name)) def withEstimatedCardinality(estimated: Double) = copy()(Some(estimated)) }
HuangLS/neo4j
community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/pipes/NodeByLabelScanPipe.scala
Scala
apache-2.0
2,520
package com.rasterfoundry.datamodel import io.circe.generic.JsonCodec /** * Case class for paginated results * * @param count number of total results available * @param hasPrevious whether or not previous results are available * @param hasNext whether or not additional results are available * @param page current page of results * @param pageSize number of results per page * @param results sequence of results for a page */ @JsonCodec final case class PaginatedResponse[A]( count: Long, hasPrevious: Boolean, hasNext: Boolean, page: Long, pageSize: Long, results: Seq[A] )
raster-foundry/raster-foundry
app-backend/datamodel/src/main/scala/PaginatedResponse.scala
Scala
apache-2.0
619
package org.workcraft.swing import javax.swing.{Timer => JTimer} import Swing._ class Timer(btn : JTimer) extends Component { def stop : Swing[Unit] = unsafeToSwing(btn.stop) def setDelay(delay : Int) = unsafeToSwing(btn.setDelay(delay)) def setInitialDelay(delay : Int) = unsafeToSwing(btn.setInitialDelay(delay)) def start = unsafeToSwing(btn.start) }
tuura/workcraft-2.2
ScalaGraphEditorUtil/src/main/scala/org/workcraft/swing/Timer.scala
Scala
gpl-3.0
367
package object simplez { type Id[A] = A type Reader[A, B] = Kleisli[Id, A, B] type ~>[F[_], G[_]] = NaturalTransformation[F, G] type ~~>[F[_[_]], G[_[_]]] = NaturalTransformation2[F, G] object id { implicit val idInstance = new Monad[Id] { def pure[A](a: => A): Id[A] = a def flatMap[A, B](F: Id[A])(f: A => Id[B]): Id[B] = f(F) } implicit val identityTransformation = new NaturalTransformation[Id, Id] { def apply[A](a: Id[A]) = a } } object const { trait ConstApplicativeT[M] extends Applicative[Lambda[a => Const[M, a]]] { implicit def M: Monoid[M] override def map[A, B](fa: Const[M, A])(f: A => B): Const[M, B] = Const[M, B](fa.m) def pure[A](a: => A): Const[M, A] = Const[M, A](M.zero) def ap[A, B](fa: => Const[M, A])(f: => Const[M, A => B]): Const[M, B] = Const[M, B](M.append(f.m, fa.m)) } implicit def ConstApplicative[M](implicit ev: Monoid[M]) = new ConstApplicativeT[M] { implicit def M: Monoid[M] = ev } } object coproduct { def coproductFunctor[F[_], G[_]](implicit F: Functor[F], G: Functor[G]) = new Functor[Coproduct[F, G, ?]] { def map[A, B](fa: Coproduct[F, G, A])(f: A => B): Coproduct[F, G, B] = { fa.value match { case Left(a) => new Coproduct[F, G, B](Left[F[B], G[B]](F.map(a)(f))) case Right(a) => new Coproduct[F, G, B](Right[F[B], G[B]](G.map(a)(f))) } } } implicit def leftInjectInstance[F[_], G[_]]: Inject[F, Coproduct[F, G, ?]] = new Inject[F, Coproduct[F, G, ?]] { def inj[A](fa: F[A]): Coproduct[F, G, A] = Coproduct.injl(fa) } // implicit def rightInjectInstance[F[_], G[_], H[_]](implicit I: Inject[F, G]): Inject[F, Coproduct[H, G, ?]] = // new Inject[F, Coproduct[H, G, ?]] { // def inj[A](fa: F[A]): Coproduct[H, G, A] = Coproduct.injr(I.inj(fa)) // } implicit def rightInjectInstance[F[_], G[_]]: Inject[G, Coproduct[F, G, ?]] = new Inject[G, Coproduct[F, G, ?]] { def inj[A](fa: G[A]): Coproduct[F, G, A] = Coproduct.injr(fa) } } object category { implicit def kleisliCategory[M[_]](implicit M: Monad[M]): Category[Lambda[(A, B) => Kleisli[M, A, B]]] = new Category[Lambda[(A, B) => Kleisli[M, A, B]]] { def id[A]: Kleisli[M, A, A] = Kleisli.kleisli { a => M.pure(a) } def compose[A, B, C](g: Kleisli[M, B, C], f: Kleisli[M, A, B]): Kleisli[M, A, C] = g <=< f } implicit def kleisliSemigroup[A, B, S[_]](implicit S: Semigroup[S[B]]) = new Semigroup[Kleisli[S, A, B]] { def append(a: Kleisli[S, A, B], b: => Kleisli[S, A, B]): Kleisli[S, A, B] = Kleisli.kleisli { x => S.append(a.run(x), b.run(x)) } } } }
inoio/simplez
main/src/main/scala/simplez/package.scala
Scala
bsd-2-clause
2,756
package skabele.screenshare.actors import akka.actor._ import akka.testkit._ import org.scalatest._ import WsData._ import WsId._ import InternalMessage._ import scala.concurrent.duration._ class ChatActorSpec extends TestKit(ActorSystem("ChatActorSpec")) with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll { override def afterAll { TestKit.shutdownActorSystem(system) } class Helper { val duration = 100.millis val socket = TestProbe() class BareChatActor(override val socket: ActorRef) extends ChatActor { var name = "Test" override def receive = receiveChat orElse receiveError } val chatActor = system.actorOf(Props(new BareChatActor(socket.ref))) } "ChatActor" should { "on SEND_CHAT publish ChatPublished to eventStream" in new Helper { val eventStreamProbe = TestProbe() system.eventStream.subscribe(eventStreamProbe.ref, classOf[ChatPublished]) val msg = "foo bar baz" chatActor ! WsMessage(SEND_CHAT, SendChat(msg)) eventStreamProbe.expectMsg(duration, ChatPublished(chatActor, "Test", msg)) } "on ChatPublished send CHAT_MSG to socket" in new Helper { val otherActor = TestProbe() val msg = "foo bar baz" chatActor ! ChatPublished(otherActor.ref, "Test", msg) socket.expectMsg(duration, WsMessage(CHAT_MSG, ChatMsg("Test", msg))) } } }
skabele/simple-screen-share
test/skabele/screenshare/actors/ChatActorSpec.scala
Scala
mit
1,401
import com.github.jeanadrien.gatling.mqtt.Predef._ import io.gatling.core.Predef._ import scala.concurrent.duration._ /** * */ class MqttScenarioExample extends Simulation { val mqttConf = mqtt.host("tcp://localhost:1883") val scn = scenario("MQTT Test") .exec(connect) .exec(subscribe("myTopic")) .during(30 seconds) { pace(1 second).exec(publish("myTopic", "myPayload")) } setUp( scn.inject(rampUsers(10) over (10 seconds))) .protocols(mqttConf) }
jeanadrien/gatling-mqtt-protocol
src/test/scala/MqttScenarioExample.scala
Scala
apache-2.0
531
package net.opentsdb.client.netty import org.jboss.netty.channel._ import org.slf4j.{LoggerFactory, Logger} class TsdbClientHandler extends SimpleChannelUpstreamHandler { private final val logger: Logger = LoggerFactory.getLogger(classOf[TsdbClient]) override def handleUpstream(ctx: ChannelHandlerContext, e: ChannelEvent) = { e match { case e: ChannelStateEvent => logger.info(e.toString) case _ => } super.handleUpstream(ctx, e) } override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent) = { logger.error(e.getMessage.toString) } override def exceptionCaught(ctx: ChannelHandlerContext, e: ExceptionEvent) = { logger.warn("Unexpected exception from downstream: {}", e.getCause) e.getChannel.close() } }
sebobr/dontpaytheferryman
opentsdb-kafka/opentsdb-client/src/main/scala/net/opentsdb/client/netty/TsdbClientHandler.scala
Scala
apache-2.0
781
trait T { def f = List(1) map { case i if i > 0 => implicit j: Int => i + implicitly[Int] case _ => implicit j: Int => 42 } def g = List(1) map { case i if i > 0 => import concurrent._ case _ => implicit j: Int => 42 } def h = List(1) map { case i if i > 0 => val x = 42 case _ => implicit j: Int => () } // separator is optional def k = List(1) map { case i if i > 0 => implicit j: Int => i + implicitly[Int] ; case _ => implicit j: Int => 42 } }
scala/scala
test/files/pos/t10684.scala
Scala
apache-2.0
467
/* * Copyright 2014-2020 Rik van der Kleij * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package intellij.haskell import com.intellij.extapi.psi.PsiFileBase import com.intellij.openapi.fileTypes._ import com.intellij.psi.FileViewProvider import javax.swing._ import org.jetbrains.annotations.NotNull class HaskellFile(viewProvider: FileViewProvider) extends PsiFileBase(viewProvider, HaskellLanguage.Instance) { @NotNull def getFileType: FileType = { HaskellFileType.INSTANCE } override def toString: String = { "Haskell file" } override def getIcon(flags: Int): Icon = { super.getIcon(flags) } }
rikvdkleij/intellij-haskell
src/main/scala/intellij/haskell/HaskellFile.scala
Scala
apache-2.0
1,142
package japgolly.scalajs.react.extra import japgolly.scalajs.react._ /** * External entities can register with this to listen (receive) data of type A. * * Install in `ScalaComponent.build` via `.configure(Listenable.listen)`. */ trait Listenable[A] { /** * Register a listener. * * @param listener The listener/consumer. A procedure that receives data of type A. * @return A procedure to unregister the given listener. */ def register(listener: A => Callback): CallbackTo[Callback] } object Listenable { def listen[P, C <: Children, S, B <: OnUnmount, A]( listenable: P => Listenable[A], makeListener: ScalaComponent.Lifecycle.ComponentDidMount[P, S, B] => A => Callback): ScalaComponent.Config[P, C, S, B] = OnUnmount.install[P, C, S, B] andThen (_.componentDidMount($ => listenable($.props).register(makeListener($)) >>= $.backend.onUnmount)) def listenToUnit[P, C <: Children, S, B <: OnUnmount]( listenable: P => Listenable[Unit], makeListener: ScalaComponent.Lifecycle.ComponentDidMount[P, S, B] => Callback): ScalaComponent.Config[P, C, S, B] = listen[P, C, S, B, Unit](listenable, $ => _ => makeListener($)) }
matthughes/scalajs-react
extra/src/main/scala/japgolly/scalajs/react/extra/Listenable.scala
Scala
apache-2.0
1,189
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming import java.util.UUID import javax.annotation.concurrent.GuardedBy import scala.collection.mutable import org.apache.hadoop.fs.Path import org.apache.spark.annotation.{Experimental, InterfaceStability} import org.apache.spark.internal.Logging import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession} import org.apache.spark.sql.catalyst.analysis.UnsupportedOperationChecker import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.state.StateStoreCoordinatorRef import org.apache.spark.sql.internal.SQLConf import org.apache.spark.util.{Clock, SystemClock, Utils} /** * :: Experimental :: * A class to manage all the [[StreamingQuery]] active on a `SparkSession`. * * @since 2.0.0 */ @Experimental @InterfaceStability.Evolving class StreamingQueryManager private[sql] (sparkSession: SparkSession) extends Logging { private[sql] val stateStoreCoordinator = StateStoreCoordinatorRef.forDriver(sparkSession.sparkContext.env) private val listenerBus = new StreamingQueryListenerBus(sparkSession.sparkContext.listenerBus) @GuardedBy("activeQueriesLock") private val activeQueries = new mutable.HashMap[UUID, StreamingQuery] private val activeQueriesLock = new Object private val awaitTerminationLock = new Object @GuardedBy("awaitTerminationLock") private var lastTerminatedQuery: StreamingQuery = null /** * Returns a list of active queries associated with this SQLContext * * @since 2.0.0 */ def active: Array[StreamingQuery] = activeQueriesLock.synchronized { activeQueries.values.toArray } /** * Returns the query if there is an active query with the given id, or null. * * @since 2.1.0 */ def get(id: UUID): StreamingQuery = activeQueriesLock.synchronized { activeQueries.get(id).orNull } /** * Returns the query if there is an active query with the given id, or null. * * @since 2.1.0 */ def get(id: String): StreamingQuery = get(UUID.fromString(id)) /** * Wait until any of the queries on the associated SQLContext has terminated since the * creation of the context, or since `resetTerminated()` was called. If any query was terminated * with an exception, then the exception will be thrown. * * If a query has terminated, then subsequent calls to `awaitAnyTermination()` will either * return immediately (if the query was terminated by `query.stop()`), * or throw the exception immediately (if the query was terminated with exception). Use * `resetTerminated()` to clear past terminations and wait for new terminations. * * In the case where multiple queries have terminated since `resetTermination()` was called, * if any query has terminated with exception, then `awaitAnyTermination()` will * throw any of the exception. For correctly documenting exceptions across multiple queries, * users need to stop all of them after any of them terminates with exception, and then check the * `query.exception()` for each query. * * @throws StreamingQueryException if any query has terminated with an exception * * @since 2.0.0 */ @throws[StreamingQueryException] def awaitAnyTermination(): Unit = { awaitTerminationLock.synchronized { while (lastTerminatedQuery == null) { awaitTerminationLock.wait(10) } if (lastTerminatedQuery != null && lastTerminatedQuery.exception.nonEmpty) { throw lastTerminatedQuery.exception.get } } } /** * Wait until any of the queries on the associated SQLContext has terminated since the * creation of the context, or since `resetTerminated()` was called. Returns whether any query * has terminated or not (multiple may have terminated). If any query has terminated with an * exception, then the exception will be thrown. * * If a query has terminated, then subsequent calls to `awaitAnyTermination()` will either * return `true` immediately (if the query was terminated by `query.stop()`), * or throw the exception immediately (if the query was terminated with exception). Use * `resetTerminated()` to clear past terminations and wait for new terminations. * * In the case where multiple queries have terminated since `resetTermination()` was called, * if any query has terminated with exception, then `awaitAnyTermination()` will * throw any of the exception. For correctly documenting exceptions across multiple queries, * users need to stop all of them after any of them terminates with exception, and then check the * `query.exception()` for each query. * * @throws StreamingQueryException if any query has terminated with an exception * * @since 2.0.0 */ @throws[StreamingQueryException] def awaitAnyTermination(timeoutMs: Long): Boolean = { val startTime = System.currentTimeMillis def isTimedout = System.currentTimeMillis - startTime >= timeoutMs awaitTerminationLock.synchronized { while (!isTimedout && lastTerminatedQuery == null) { awaitTerminationLock.wait(10) } if (lastTerminatedQuery != null && lastTerminatedQuery.exception.nonEmpty) { throw lastTerminatedQuery.exception.get } lastTerminatedQuery != null } } /** * Forget about past terminated queries so that `awaitAnyTermination()` can be used again to * wait for new terminations. * * @since 2.0.0 */ def resetTerminated(): Unit = { awaitTerminationLock.synchronized { lastTerminatedQuery = null } } /** * Register a [[StreamingQueryListener]] to receive up-calls for life cycle events of * [[StreamingQuery]]. * * @since 2.0.0 */ def addListener(listener: StreamingQueryListener): Unit = { listenerBus.addListener(listener) } /** * Deregister a [[StreamingQueryListener]]. * * @since 2.0.0 */ def removeListener(listener: StreamingQueryListener): Unit = { listenerBus.removeListener(listener) } /** Post a listener event */ private[sql] def postListenerEvent(event: StreamingQueryListener.Event): Unit = { listenerBus.post(event) } private def createQuery( userSpecifiedName: Option[String], userSpecifiedCheckpointLocation: Option[String], df: DataFrame, sink: Sink, outputMode: OutputMode, useTempCheckpointLocation: Boolean, recoverFromCheckpointLocation: Boolean, trigger: Trigger, triggerClock: Clock): StreamingQueryWrapper = { var deleteCheckpointOnStop = false val checkpointLocation = userSpecifiedCheckpointLocation.map { userSpecified => new Path(userSpecified).toUri.toString }.orElse { df.sparkSession.sessionState.conf.checkpointLocation.map { location => new Path(location, userSpecifiedName.getOrElse(UUID.randomUUID().toString)).toUri.toString } }.getOrElse { if (useTempCheckpointLocation) { // Delete the temp checkpoint when a query is being stopped without errors. deleteCheckpointOnStop = true Utils.createTempDir(namePrefix = s"temporary").getCanonicalPath } else { throw new AnalysisException( "checkpointLocation must be specified either " + """through option("checkpointLocation", ...) or """ + s"""SparkSession.conf.set("${SQLConf.CHECKPOINT_LOCATION.key}", ...)""") } } // If offsets have already been created, we trying to resume a query. if (!recoverFromCheckpointLocation) { val checkpointPath = new Path(checkpointLocation, "offsets") val fs = checkpointPath.getFileSystem(df.sparkSession.sessionState.newHadoopConf()) if (fs.exists(checkpointPath)) { throw new AnalysisException( s"This query does not support recovering from checkpoint location. " + s"Delete $checkpointPath to start over.") } } val analyzedPlan = df.queryExecution.analyzed df.queryExecution.assertAnalyzed() if (sparkSession.sessionState.conf.isUnsupportedOperationCheckEnabled) { UnsupportedOperationChecker.checkForStreaming(analyzedPlan, outputMode) } if (sparkSession.sessionState.conf.adaptiveExecutionEnabled) { logWarning(s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} " + "is not supported in streaming DataFrames/Datasets and will be disabled.") } new StreamingQueryWrapper(new StreamExecution( sparkSession, userSpecifiedName.orNull, checkpointLocation, analyzedPlan, sink, trigger, triggerClock, outputMode, deleteCheckpointOnStop)) } /** * Start a [[StreamingQuery]]. * * @param userSpecifiedName Query name optionally specified by the user. * @param userSpecifiedCheckpointLocation Checkpoint location optionally specified by the user. * @param df Streaming DataFrame. * @param sink Sink to write the streaming outputs. * @param outputMode Output mode for the sink. * @param useTempCheckpointLocation Whether to use a temporary checkpoint location when the user * has not specified one. If false, then error will be thrown. * @param recoverFromCheckpointLocation Whether to recover query from the checkpoint location. * If false and the checkpoint location exists, then error * will be thrown. * @param trigger [[Trigger]] for the query. * @param triggerClock [[Clock]] to use for the triggering. */ private[sql] def startQuery( userSpecifiedName: Option[String], userSpecifiedCheckpointLocation: Option[String], df: DataFrame, sink: Sink, outputMode: OutputMode, useTempCheckpointLocation: Boolean = false, recoverFromCheckpointLocation: Boolean = true, trigger: Trigger = ProcessingTime(0), triggerClock: Clock = new SystemClock()): StreamingQuery = { val query = createQuery( userSpecifiedName, userSpecifiedCheckpointLocation, df, sink, outputMode, useTempCheckpointLocation, recoverFromCheckpointLocation, trigger, triggerClock) activeQueriesLock.synchronized { // Make sure no other query with same name is active userSpecifiedName.foreach { name => if (activeQueries.values.exists(_.name == name)) { throw new IllegalArgumentException( s"Cannot start query with name $name as a query with that name is already active") } } // Make sure no other query with same id is active if (activeQueries.values.exists(_.id == query.id)) { throw new IllegalStateException( s"Cannot start query with id ${query.id} as another query with same id is " + s"already active. Perhaps you are attempting to restart a query from checkpoint " + s"that is already active.") } activeQueries.put(query.id, query) } try { // When starting a query, it will call `StreamingQueryListener.onQueryStarted` synchronously. // As it's provided by the user and can run arbitrary codes, we must not hold any lock here. // Otherwise, it's easy to cause dead-lock, or block too long if the user codes take a long // time to finish. query.streamingQuery.start() } catch { case e: Throwable => activeQueriesLock.synchronized { activeQueries -= query.id } throw e } query } /** Notify (by the StreamingQuery) that the query has been terminated */ private[sql] def notifyQueryTermination(terminatedQuery: StreamingQuery): Unit = { activeQueriesLock.synchronized { activeQueries -= terminatedQuery.id } awaitTerminationLock.synchronized { if (lastTerminatedQuery == null || terminatedQuery.exception.nonEmpty) { lastTerminatedQuery = terminatedQuery } awaitTerminationLock.notifyAll() } } }
MLnick/spark
sql/core/src/main/scala/org/apache/spark/sql/streaming/StreamingQueryManager.scala
Scala
apache-2.0
12,820
// Project: scalajs-rxjs // Module: // Description: // Copyright (c) 2016. Distributed under the MIT License (see included LICENSE file). package rxjs import scala.scalajs.js @js.native trait Scheduler extends js.Object { }
jokade/scalajs-rxjs
src/main/scala/rxjs/Scheduler.scala
Scala
mit
237
package clients import com.softwaremill.sttp.circe._ import com.softwaremill.sttp.{sttp, _} import contexts.AkkaContext import io.circe.generic.auto._ import shared.requests.policies._ import shared.responses.policies._ trait FoulkonPolicyClient extends FoulkonConfig { self: AkkaContext => val listAllPoliciesRequest = (request: ReadPoliciesRequest) => sttp .get(uri"http://$foulkonHost:$foulkonPort/api/v1/policies?Offset=${request.offset}&Limit=${request.limit}") .contentType("application/json") .auth .basic(foulkonUser, foulkonPassword) .response(asJson[PoliciesListAllResponse]) val policyDetailRequest = (request: GetPolicyRequest) => sttp .get( uri"http://$foulkonHost:$foulkonPort/api/v1/organizations/${request.pathParams.organizationId}/policies/${request.pathParams.policyName}") .contentType("application/json") .auth .basic(foulkonUser, foulkonPassword) .response(asJson[GetPolicyResponse]) val createPolicyRequest = (request: CreatePolicyRequest) => sttp .body(request.body) .post(uri"http://$foulkonHost:$foulkonPort/api/v1/organizations/${request.pathParams.organizationId}/policies") .contentType("application/json") .auth .basic(foulkonUser, foulkonPassword) .response(asJson[CreatePolicyResponse]) val deletePolicyRequest = (request: DeletePolicyRequest) => sttp .delete( uri"http://$foulkonHost:$foulkonPort/api/v1/organizations/${request.pathParams.organizationId}/policies/${request.pathParams.policyName}") .contentType("application/json") .auth .basic(foulkonUser, foulkonPassword) .mapResponse(_ => DeletePolicyResponse(request.pathParams.organizationId, request.pathParams.policyName)) val updatePolicyRequest = (request: UpdatePolicyRequest) => sttp .body(request.body) .put( uri"http://$foulkonHost:$foulkonPort/api/v1/organizations/${request.pathParams.organizationId}/policies/${request.pathParams.policyName}") .contentType("application/json") .auth .basic(foulkonUser, foulkonPassword) .response(asJson[UpdatePolicyResponse]) }
beikern/foulkon-ui
server/src/main/scala/clients/FoulkonPolicyClient.scala
Scala
apache-2.0
2,270