content
stringlengths 228
999k
| pred_label
stringclasses 1
value | pred_score
float64 0.5
1
|
---|---|---|
Source code for galaxy.jobs.runners.util.cli.shell.local
import os
from logging import getLogger
from subprocess import (
PIPE,
Popen,
)
from tempfile import TemporaryFile
from time import sleep
from galaxy.util.bunch import Bunch
from . import BaseShellExec
from ....util.process_groups import (
check_pg,
kill_pg,
)
log = getLogger(__name__)
TIMEOUT_ERROR_MESSAGE = "Execution timed out"
TIMEOUT_RETURN_CODE = -1
DEFAULT_TIMEOUT = 60
DEFAULT_TIMEOUT_CHECK_INTERVAL = 3
[docs]class LocalShell(BaseShellExec): """ >>> shell = LocalShell() >>> def exec_python(script, **kwds): return shell.execute(['python', '-c', script], **kwds) >>> exec_result = exec_python("from __future__ import print_function; print('Hello World')") >>> exec_result.stderr == u'' True >>> exec_result.stdout.strip() == u'Hello World' True >>> exec_result.returncode 0 >>> exec_result = exec_python("import time; time.sleep(10)", timeout=1, timeout_check_interval=.1) >>> exec_result.stdout == u'' True >>> exec_result.stderr == 'Execution timed out' True >>> exec_result.returncode == TIMEOUT_RETURN_CODE True >>> shell.execute('echo hi').stdout == "hi\\n" True """
[docs] def __init__(self, **kwds): pass
[docs] def execute( self, cmd, persist=False, timeout=DEFAULT_TIMEOUT, timeout_check_interval=DEFAULT_TIMEOUT_CHECK_INTERVAL, **kwds ): is_cmd_string = isinstance(cmd, str) outf = TemporaryFile() p = Popen(cmd, stdin=None, stdout=outf, stderr=PIPE, shell=is_cmd_string, preexec_fn=os.setpgrp) # check process group until timeout for _ in range(int(timeout / timeout_check_interval)): sleep(0.1) # For fast returning commands if not check_pg(p.pid): break sleep(timeout_check_interval) else: kill_pg(p.pid) return Bunch(stdout="", stderr=TIMEOUT_ERROR_MESSAGE, returncode=TIMEOUT_RETURN_CODE) outf.seek(0) # Need to poll once to establish return code p.poll() return Bunch(stdout=_read_str(outf), stderr=_read_str(p.stderr), returncode=p.returncode)
def _read_str(stream): contents = stream.read() return contents.decode("UTF-8") if isinstance(contents, bytes) else contents __all__ = ("LocalShell",)
|
__label__pos
| 0.998122 |
Location: PHPKode > projects > PHPLayouts > PHPLayouts/tests/simpletest/test/visual_test.php
<?php
// $Id: visual_test.php 1547 2007-07-04 00:42:05Z lastcraft $
// NOTE:
// Some of these tests are designed to fail! Do not be alarmed.
// ----------------
// The following tests are a bit hacky. Whilst Kent Beck tried to
// build a unit tester with a unit tester, I am not that brave.
// Instead I have just hacked together odd test scripts until
// I have enough of a tester to procede more formally.
//
// The proper tests start in all_tests.php
require_once('../unit_tester.php');
require_once('../shell_tester.php');
require_once('../mock_objects.php');
require_once('../reporter.php');
require_once('../xml.php');
class TestDisplayClass {
var $_a;
function TestDisplayClass($a) {
$this->_a = $a;
}
}
class PassingUnitTestCaseOutput extends UnitTestCase {
function testOfResults() {
$this->pass('Pass');
}
function testTrue() {
$this->assertTrue(true);
}
function testFalse() {
$this->assertFalse(false);
}
function testExpectation() {
$expectation = &new EqualExpectation(25, 'My expectation message: %s');
$this->assert($expectation, 25, 'My assert message : %s');
}
function testNull() {
$this->assertNull(null, "%s -> Pass");
$this->assertNotNull(false, "%s -> Pass");
}
function testType() {
$this->assertIsA("hello", "string", "%s -> Pass");
$this->assertIsA($this, "PassingUnitTestCaseOutput", "%s -> Pass");
$this->assertIsA($this, "UnitTestCase", "%s -> Pass");
}
function testTypeEquality() {
$this->assertEqual("0", 0, "%s -> Pass");
}
function testNullEquality() {
$this->assertNotEqual(null, 1, "%s -> Pass");
$this->assertNotEqual(1, null, "%s -> Pass");
}
function testIntegerEquality() {
$this->assertNotEqual(1, 2, "%s -> Pass");
}
function testStringEquality() {
$this->assertEqual("a", "a", "%s -> Pass");
$this->assertNotEqual("aa", "ab", "%s -> Pass");
}
function testHashEquality() {
$this->assertEqual(array("a" => "A", "b" => "B"), array("b" => "B", "a" => "A"), "%s -> Pass");
}
function testWithin() {
$this->assertWithinMargin(5, 5.4, 0.5, "%s -> Pass");
}
function testOutside() {
$this->assertOutsideMargin(5, 5.6, 0.5, "%s -> Pass");
}
function testStringIdentity() {
$a = "fred";
$b = $a;
$this->assertIdentical($a, $b, "%s -> Pass");
}
function testTypeIdentity() {
$a = "0";
$b = 0;
$this->assertNotIdentical($a, $b, "%s -> Pass");
}
function testNullIdentity() {
$this->assertNotIdentical(null, 1, "%s -> Pass");
$this->assertNotIdentical(1, null, "%s -> Pass");
}
function testHashIdentity() {
}
function testObjectEquality() {
$this->assertEqual(new TestDisplayClass(4), new TestDisplayClass(4), "%s -> Pass");
$this->assertNotEqual(new TestDisplayClass(4), new TestDisplayClass(5), "%s -> Pass");
}
function testObjectIndentity() {
$this->assertIdentical(new TestDisplayClass(false), new TestDisplayClass(false), "%s -> Pass");
$this->assertNotIdentical(new TestDisplayClass(false), new TestDisplayClass(0), "%s -> Pass");
}
function testReference() {
$a = "fred";
$b = &$a;
$this->assertReference($a, $b, "%s -> Pass");
}
function testCloneOnDifferentObjects() {
$a = "fred";
$b = $a;
$c = "Hello";
$this->assertClone($a, $b, "%s -> Pass");
}
function testPatterns() {
$this->assertPattern('/hello/i', "Hello there", "%s -> Pass");
$this->assertNoPattern('/hello/', "Hello there", "%s -> Pass");
}
function testLongStrings() {
$text = "";
for ($i = 0; $i < 10; $i++) {
$text .= "0123456789";
}
$this->assertEqual($text, $text);
}
}
class FailingUnitTestCaseOutput extends UnitTestCase {
function testOfResults() {
$this->fail('Fail'); // Fail.
}
function testTrue() {
$this->assertTrue(false); // Fail.
}
function testFalse() {
$this->assertFalse(true); // Fail.
}
function testExpectation() {
$expectation = &new EqualExpectation(25, 'My expectation message: %s');
$this->assert($expectation, 24, 'My assert message : %s'); // Fail.
}
function testNull() {
$this->assertNull(false, "%s -> Fail"); // Fail.
$this->assertNotNull(null, "%s -> Fail"); // Fail.
}
function testType() {
$this->assertIsA(14, "string", "%s -> Fail"); // Fail.
$this->assertIsA(14, "TestOfUnitTestCaseOutput", "%s -> Fail"); // Fail.
$this->assertIsA($this, "TestReporter", "%s -> Fail"); // Fail.
}
function testTypeEquality() {
$this->assertNotEqual("0", 0, "%s -> Fail"); // Fail.
}
function testNullEquality() {
$this->assertEqual(null, 1, "%s -> Fail"); // Fail.
$this->assertEqual(1, null, "%s -> Fail"); // Fail.
}
function testIntegerEquality() {
$this->assertEqual(1, 2, "%s -> Fail"); // Fail.
}
function testStringEquality() {
$this->assertNotEqual("a", "a", "%s -> Fail"); // Fail.
$this->assertEqual("aa", "ab", "%s -> Fail"); // Fail.
}
function testHashEquality() {
$this->assertEqual(array("a" => "A", "b" => "B"), array("b" => "B", "a" => "Z"), "%s -> Fail");
}
function testWithin() {
$this->assertWithinMargin(5, 5.6, 0.5, "%s -> Fail"); // Fail.
}
function testOutside() {
$this->assertOutsideMargin(5, 5.4, 0.5, "%s -> Fail"); // Fail.
}
function testStringIdentity() {
$a = "fred";
$b = $a;
$this->assertNotIdentical($a, $b, "%s -> Fail"); // Fail.
}
function testTypeIdentity() {
$a = "0";
$b = 0;
$this->assertIdentical($a, $b, "%s -> Fail"); // Fail.
}
function testNullIdentity() {
$this->assertIdentical(null, 1, "%s -> Fail"); // Fail.
$this->assertIdentical(1, null, "%s -> Fail"); // Fail.
}
function testHashIdentity() {
$this->assertIdentical(array("a" => "A", "b" => "B"), array("b" => "B", "a" => "A"), "%s -> fail"); // Fail.
}
function testObjectEquality() {
$this->assertNotEqual(new TestDisplayClass(4), new TestDisplayClass(4), "%s -> Fail"); // Fail.
$this->assertEqual(new TestDisplayClass(4), new TestDisplayClass(5), "%s -> Fail"); // Fail.
}
function testObjectIndentity() {
$this->assertNotIdentical(new TestDisplayClass(false), new TestDisplayClass(false), "%s -> Fail"); // Fail.
$this->assertIdentical(new TestDisplayClass(false), new TestDisplayClass(0), "%s -> Fail"); // Fail.
}
function testReference() {
$a = "fred";
$b = &$a;
$this->assertClone($a, $b, "%s -> Fail"); // Fail.
}
function testCloneOnDifferentObjects() {
$a = "fred";
$b = $a;
$c = "Hello";
$this->assertClone($a, $c, "%s -> Fail"); // Fail.
}
function testPatterns() {
$this->assertPattern('/hello/', "Hello there", "%s -> Fail"); // Fail.
$this->assertNoPattern('/hello/i', "Hello there", "%s -> Fail"); // Fail.
}
function testLongStrings() {
$text = "";
for ($i = 0; $i < 10; $i++) {
$text .= "0123456789";
}
$this->assertEqual($text . $text, $text . "a" . $text); // Fail.
}
}
class Dummy {
function Dummy() {
}
function a() {
}
}
Mock::generate('Dummy');
class TestOfMockObjectsOutput extends UnitTestCase {
function testCallCounts() {
$dummy = &new MockDummy();
$dummy->expectCallCount('a', 1, 'My message: %s');
$dummy->a();
$dummy->a();
}
function testMinimumCallCounts() {
$dummy = &new MockDummy();
$dummy->expectMinimumCallCount('a', 2, 'My message: %s');
$dummy->a();
$dummy->a();
}
function testEmptyMatching() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array());
$dummy->a();
$dummy->a(null); // Fail.
}
function testEmptyMatchingWithCustomMessage() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array(), 'My expectation message: %s');
$dummy->a();
$dummy->a(null); // Fail.
}
function testNullMatching() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array(null));
$dummy->a(null);
$dummy->a(); // Fail.
}
function testBooleanMatching() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array(true, false));
$dummy->a(true, false);
$dummy->a(true, true); // Fail.
}
function testIntegerMatching() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array(32, 33));
$dummy->a(32, 33);
$dummy->a(32, 34); // Fail.
}
function testFloatMatching() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array(3.2, 3.3));
$dummy->a(3.2, 3.3);
$dummy->a(3.2, 3.4); // Fail.
}
function testStringMatching() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array('32', '33'));
$dummy->a('32', '33');
$dummy->a('32', '34'); // Fail.
}
function testEmptyMatchingWithCustomExpectationMessage() {
$dummy = &new MockDummy();
$dummy->expectArguments(
'a',
array(new EqualExpectation('A', 'My part expectation message: %s')),
'My expectation message: %s');
$dummy->a('A');
$dummy->a('B'); // Fail.
}
function testArrayMatching() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array(array(32), array(33)));
$dummy->a(array(32), array(33));
$dummy->a(array(32), array('33')); // Fail.
}
function testObjectMatching() {
$a = new Dummy();
$a->a = 'a';
$b = new Dummy();
$b->b = 'b';
$dummy = &new MockDummy();
$dummy->expectArguments('a', array($a, $b));
$dummy->a($a, $b);
$dummy->a($a, $a); // Fail.
}
function testBigList() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array(false, 0, 1, 1.0));
$dummy->a(false, 0, 1, 1.0);
$dummy->a(true, false, 2, 2.0); // Fail.
}
}
class TestOfPastBugs extends UnitTestCase {
function testMixedTypes() {
$this->assertEqual(array(), null, "%s -> Pass");
$this->assertIdentical(array(), null, "%s -> Fail"); // Fail.
}
function testMockWildcards() {
$dummy = &new MockDummy();
$dummy->expectArguments('a', array('*', array(33)));
$dummy->a(array(32), array(33));
$dummy->a(array(32), array('33')); // Fail.
}
}
class TestOfVisualShell extends ShellTestCase {
function testDump() {
$this->execute('ls');
$this->dumpOutput();
$this->execute('dir');
$this->dumpOutput();
}
function testDumpOfList() {
$this->execute('ls');
$this->dump($this->getOutputAsList());
}
}
class PassesAsWellReporter extends HtmlReporter {
function _getCss() {
return parent::_getCss() . ' .pass { color: darkgreen; }';
}
function paintPass($message) {
parent::paintPass($message);
print "<span class=\"pass\">Pass</span>: ";
$breadcrumb = $this->getTestList();
array_shift($breadcrumb);
print implode(" -> ", $breadcrumb);
print " -> " . htmlentities($message) . "<br />\n";
}
function paintSignal($type, &$payload) {
print "<span class=\"fail\">$type</span>: ";
$breadcrumb = $this->getTestList();
array_shift($breadcrumb);
print implode(" -> ", $breadcrumb);
print " -> " . htmlentities(serialize($payload)) . "<br />\n";
}
}
class TestOfSkippingNoMatterWhat extends UnitTestCase {
function skip() {
$this->skipIf(true, 'Always skipped -> %s');
}
function testFail() {
$this->fail('This really shouldn\'t have happened');
}
}
class TestOfSkippingOrElse extends UnitTestCase {
function skip() {
$this->skipUnless(false, 'Always skipped -> %s');
}
function testFail() {
$this->fail('This really shouldn\'t have happened');
}
}
class TestOfSkippingTwiceOver extends UnitTestCase {
function skip() {
$this->skipIf(true, 'First reason -> %s');
$this->skipIf(true, 'Second reason -> %s');
}
function testFail() {
$this->fail('This really shouldn\'t have happened');
}
}
class TestThatShouldNotBeSkipped extends UnitTestCase {
function skip() {
$this->skipIf(false);
$this->skipUnless(true);
}
function testFail() {
$this->fail('We should see this message');
}
function testPass() {
$this->pass('We should see this message');
}
}
$test = &new TestSuite('Visual test with 46 passes, 47 fails and 0 exceptions');
$test->addTestCase(new PassingUnitTestCaseOutput());
$test->addTestCase(new FailingUnitTestCaseOutput());
$test->addTestCase(new TestOfMockObjectsOutput());
$test->addTestCase(new TestOfPastBugs());
$test->addTestCase(new TestOfVisualShell());
$test->addTestCase(new TestOfSkippingNoMatterWhat());
$test->addTestCase(new TestOfSkippingOrElse());
$test->addTestCase(new TestOfSkippingTwiceOver());
$test->addTestCase(new TestThatShouldNotBeSkipped());
if (isset($_GET['xml']) || in_array('xml', (isset($argv) ? $argv : array()))) {
$reporter = &new XmlReporter();
} elseif (TextReporter::inCli()) {
$reporter = &new TextReporter();
} else {
$reporter = &new PassesAsWellReporter();
}
if (isset($_GET['dry']) || in_array('dry', (isset($argv) ? $argv : array()))) {
$reporter->makeDry();
}
exit ($test->run($reporter) ? 0 : 1);
?>
Return current item: PHPLayouts
|
__label__pos
| 0.997771 |
2
$\begingroup$
I try to compute the distance between two curves. I use the EuclideanDistance to do that.
Here my code:
j = -1;
a32 = 3.9683436;
a43 = 4.2925064;
alfaF32 = 1.54553;
alfaF43 = 2.34472;
mu = 10^-3;
tot4 = alfaF32*mu;
tot5 = alfaF43*mu;
S = ((EuclideanDistance[{a43 (-Sqrt[(16 tot5*i)/
3 (1 + tot5/(27 i^3))] - (2 tot5)/(9 j*i)) +
a43}, {a32 (Sqrt[(16 tot4*i)/3 (1 + tot4/(27 i^3))] - (
2 tot4)/(9 j*i)) + a32}])/(3/2 - 4/3))^2;
n = ListPlot[Table[S, {i, 0.0001, .3, 0.0001}]]
But the x-axis is wrong, i try to explan. The x-axis must go from 0.0001 to 0.3 as in the S function. But the x-axis is completly wrong (the dimension is from 0.3/0.0001). Someone could help me?
$\endgroup$
• $\begingroup$ Could you please give us your definition of the "Euclidean" distance between curves? There are many possible distances based on Euclidean distances--Hausdorff distance is one, for instance, or perhaps you are looking for the $L^2$ distance between graphs of functions? $\endgroup$ – whuber Apr 25 '13 at 17:29
• 1
$\begingroup$ It looks like you are measuring vertical differences at successive points on the horizontal axis. That's not what one generally means by "distance between" two curves. $\endgroup$ – Daniel Lichtblau Apr 25 '13 at 17:37
• 1
$\begingroup$ If you don't provide x values in the ListPlot Mathematica assumes x values 1, 2, 3.... So change Table[S to Table[{i,S} and it should work. By the was, it's better not to start variables with uppercase characters, in order not to confuse them with built in ones. $\endgroup$ – Sjoerd C. de Vries Apr 25 '13 at 17:42
5
$\begingroup$
I'm not really sure if what you are doing makes any sense, but this code seems to implement that dubious thing:
f1[i_] := a43 (-Sqrt[(16 tot5*i)/3 (1 + tot5/(27 i^3))] - (2 tot5)/(9 j*i)) + a43;
f2[i_] := a32 (Sqrt[(16 tot4*i)/3 (1 + tot4/(27 i^3))] - (2 tot4)/(9 j*i)) + a32;
s[i_] := (EuclideanDistance[f1[i], f2[i]]/(3/2 - 4/3))^2;
n = ListPlot[Table[{i, s[i]}, {i, 0.0001, .3, 0.0001}]]
enter image description here
$\endgroup$
7
$\begingroup$
I can't tell what you're doing either, but here's an idea using Nearest:
l1 = Table[{x, 1 + x^2}, {x, -2, 2, .005}];
l2 = Table[{x, 1/2 x^2}, {x, -2, 2, .005}];
d = {#[[1]], EuclideanDistance[#, First@Nearest[l2, #]]} & /@ l1;
ListLinePlot[{l1, l2, d}]
enter image description here
I don't know if there's a formal name for this nearest-point distance. In any case, the parameterization of this plot is questionable, so I think a better visualization would be something along the lines of this:
fD[f1_, f2_, iter_, opacity_: .5, colorF_: ColorData["TemperatureMap"]] :=
Module[{l1, l2, d, minmax},
l1 = Table[{First[iter], f1}, iter];
l2 = Table[{First[iter], f2}, iter];
d = Module[{nearest = First@Nearest[l2, #]},
{#, nearest, EuclideanDistance[#, nearest]}] & /@ l1;
minmax = {Min[Last /@ d], Max[Last /@ d]};
d = {#1, #2, Rescale[#3, minmax]} & @@@ d;
Show[ListLinePlot[{l1, l2}, AxesStyle -> Gray],
Graphics[{Opacity[opacity], {colorF[#3], Line[{#1, #2}]} & @@@ d}]]];
fD[1 + x^2, 1/2 x^2, {x, -2, 2, .001}]
enter image description here
And then, because deep down we're all some manner of deranged mad scientist hellbent on the destruction of this planet (or at least I am):
slides = ParallelTable[
Rasterize@fD[Sin[-offset + x^2], Cos[offset + 1/2 + x^2], {x, 0, 2 Pi, .001}],
{offset, 0, 2 Pi, 2 Pi/40}];
enter image description here
MUAHAHAHA. *goes mad with power* (Note for serious use the function needs HoldAll etc.)
Edit: Another issue I just noticed is that without restricting the plot range, you will get incorrect visuals at the ends of these kinds of plots. That's what accounts for the quirky banding at the ends of the parabolic plot.
$\endgroup$
Your Answer
By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.919333 |
Numbers with Cool Names: Weird, Sexy, Untouchable
Molly Roberts
Act One Scene 1
(Thunder and lighting. Three weird numbers enter.)
70: When shall we three meet again? In thunder, lighting, or in rain?
836: When the hurly-burly’s done, when the battle’s lost and won.
4030: That will be ere the set of sun.
70: Where the place?
836: Upon the heath.
4030: There to meet with Macbeth.
For those of you unfamiliar with Shakespeare’s Macbeth, this is the opening scene, where the three weird sisters – or witches – meet to discuss the fate of the plays titular character. The joke here is that 70, 836, and 4030 are the first, second, and third weird numbers respectively. In this third article of the numbers with cool names series, we’ll be investigating what it means for a number to be weird, what it means for a number to be a sexy prime, and how a number can reach celebrity status by being untouchable. Among numbers, 2 is practically Beyoncé.
Weird Numbers
Before we start to learn some new types of number, I would recommend checking out the previous two articles in this series as I will be using some terminology previously explained in greater detail. The links to these can be found at the bottom of the page.
Okay, onto the weird and wonderful. A weird number is a number which satisfies two conditions:
1. The sum of the proper divisors is greater than the number itself
2. No subset of the proper divisors sums to the number itself
Now, this might seem like quite a confusing definition, so I think we’d better begin with an example. As I mentioned above, 70 is the first of the weird numbers. Let’s consider the proper divisors of 70 (whole numbers less than 70 which divide it without leaving any remainder): 1, 2, 5, 7, 10, 14, and 35. These add up to 74, which is clearly greater than 70, and so the first condition of being weird is satisfied. In order to satisfy the second condition, we need to make sure we can never find a subset of the proper divisors which add together to make exactly 70. In other words, if we take some, but not all, of the proper divisors and add them together, this total must never be 70.
Suppose this is not the case. Since the proper divisors sum to 74, we need to be able to subtract one or more of them from 74 to get 70. Clearly all the candidates that are bigger than or equal to 5 will not work. But then the only proper divisors left are 1, and 2, and these sum only to 3. So subtracting any number of the proper divisors from 74 will give a number either strictly bigger or strictly less than 70.
Therefore, we conclude that we can never find a subset of the proper divisors which add together to make exactly 70, and so condition 2 is satisfied: 70 is weird.
Let’s try a second example with a number that isn’t weird. Pick 12. This has proper divisors 1, 2, 3, 4, and 6. Now, these sum to 16, which is greater than 12, and so the first condition of weirdness is satisfied. However, if we take 1, 2, 3, and 6, which is a subset of the proper divisors, we can see that these sum to exactly 12. Hence the second condition of weirdness is violated and 12 is not weird.
In fact, every number between 1 and 69 is not weird. Why don’t you investigate this! Have a go at finding the proper divisors for 18, 30, and 60. In each case, can you find the sum of the proper divisors? And then can you find subsets which sum to give the number itself? Hint: there is more than one subset for each example.
[Scroll down to reveal the answers and the next section on Sexy Numbers!]
.
.
.
.
.
.
.
.
.
.
.
.
Answers
• 18 has proper divisors 1, 2, 3, 6, and 9. The sum of these is 21. The subsets of these which sum to 18 are {1, 2, 6, 9} and {3, 6, 9}.
• 30 has proper divisors 1, 2, 3, 5, 6, 10, and 15. The sum of these is 42. The subsets of these which sum to 30 are {1, 3, 5, 6, 15} and {5, 10, 15}.
• 60 has proper divisors 1, 2, 3, 4, 5, 6, 10, 12, 15, 20, and 30. The sum of these is 108. The subsets of these which sum to 60 are {1, 3, 5, 6, 15, 30} and {5, 10, 15, 30}.
Sexy Primes
Which numbers do you think are sexy? I think everyone will be disappointed to learn that 69 is not a sexy prime – as it’s simply not a prime number. In fact, sexy primes are fairly boring. They are simply prime numbers which differ from another prime number by exactly six. Examples of sexy primes include 5, which differs from 11 by 6, or 23, which differs from 29 by 6. The name is derived from the Latin ‘sex’ meaning six, but mathematicians, having a great sense of humour, decided they considered every prime number differing from another by six to be sexy.
In fact, you can find lots of sequences of sexy primes throughout the integers. The longest possible sequence is of length five, and is unique: 5, 11, 17, 23, 29. Can you see why this is the longest possible? And can you see why it is unique?
The sequence above is the first one to occur since 2 + 6 = 8 which is not prime, and 3 + 6 = 9 which is also not prime. The next prime number after 2 and 3 is 5 and this gives rise to our sequence. We continue to add 6 – moving between primes at each stage – until we reach 29, but then we are forced to stop since 29 + 6 = 35, which is not prime. So this sequence has length 5 and can’t be extended.
Now let’s suppose we have a longer sequence, say of length 6, somewhere else along the numberline. If such a sequence exists, then it also contains a sequence of length 5 (for example just take the first 5 numbers out of the 6 available). This means that if we can show our sequence is the only one of length 5, we automatically get that it is the longest.
So, let’s try to do exactly that. Consider any sequence of sexy primes of length five. Call the first number in the sequence ‘n’. Then our sequence can be written:
n, n + 6, n + 12, n + 18, n + 24 (where all of these values are prime)
Now there are five options:
1. If n is a multiple of 5, it must be exactly 5 as we have assumed that n is prime. This gives our sequence above.
2. If n is one more than a multiple of 5, then n + 24 is 25 more than a multiple of 5. But this is then also a multiple of 5, which isn’t allowed as n + 24 must be prime. Therefore option 2 cannot give a sequence.
3. If n is two more than a multiple of 5, then n + 18 is 20 more than a multiple of 5, which means it is therefore also a multiple of 5. Once again, this can’t happen since n + 18 must be prime. Therefore option 3 fails to generate a sequence.
4. If n is three more than a multiple of 5, then n + 12 is 15 more than a multiple of 5, which is itself also a multiple of 5. As above, this needs to be prime to be in the sequence and so once again option 4 cannot generate a sequence.
5. Finally, if n is four more than a multiple of 5, then n + 6 is 10 more than a multiple of 5 which means it is itself also a multiple of 5. Again, nonsense.
In conclusion, given that any number n has to fall into one of the 5 options above, we can see from our working that only option 1 makes any sense, which gives exactly the sequence we claimed was unique.
Now, can you find any pairs of sexy primes larger than 29? How about triplets? Quadruplets?
[Scroll down for the answers and for the next section on Untouchable Numbers!]
.
.
.
.
.
.
.
.
.
.
.
.
Answers
All the options for primes under 100:
• Pairs: {31, 37}, {37, 43}, {41, 47}, {47, 53}, {53, 59}, {61, 67}, {67, 73}, {73, 79}, {83, 89}
• Triplets: {31, 37, 43}, {41, 47, 53}, {47, 53, 59}, {61, 67, 73}, {67, 73, 79}
• Quadruplets: {41, 47, 53, 59}, {61, 67, 73, 79}
Untouchable Numbers
Ah, the untouchables. Revered, worshipped, elevated beyond human status. Or… Integer status. These numbers stand out from the rest, part of an exclusive club of integers so special that they don’t associate with any others. But what actually makes a number untouchable?
Again, we think about divisors. Untouchable numbers are numbers which cannot be written as the sum of all the proper divisors of any integer. In other words, no matter which integer we pick, if we take all the proper divisors of this integer and add them up, we can guarantee that this sum will not equal our untouchable number. As usual, let’s look at an example to make this clearer.
Consider the number 5. Every integer (except 1) has 1 as a proper divisor. So in order to reach 5 by summing proper divisors, we need a set of distinct numbers that includes 1 which must sum to 5. Now, let’s think of all the ways of summing distinct integers to reach 5:
• 5 = 5
• 1 + 4 = 5
• 2 + 3 = 5
These are the only ways, because summing the smallest three distinct natural numbers gives 1 + 2 + 3 = 6, which is larger than 5 and so immediately we may discount all other combinations of three or more. From the argument above, we need the combination to include a 1 if we are to reach 5 by summing proper divisors. Hence the proper divisors must be exactly 1 and 4, since the other two ways of summing to 5 do not include 1. But if 4 is a proper divisor of a number, then so is 2, and hence the set {1, 4} cannot be the only proper divisors of any number. This means 5 is untouchable.
Now consider the number 3. We may reach 3 by summing 1 and 2, and these are the only proper divisors of the number 4. Hence we conclude that 3 is not untouchable.
There is a very interesting article from 1991 by J. Sesiano, which discusses the theory around untouchable numbers, although the author does not use the term ‘untouchable’. In fact, the article states that 2 and 5 ‘stand among the numbers like bastards among people’. A rather less favourable view of what it means to be untouchable. You can find a preview of the article here.
Now, can you determine which of the numbers from 2 to 10 are untouchable? I’ll give you a hint: consider the products and squares of prime numbers…
[Scroll down for the answer!]
.
.
.
.
.
.
.
.
.
.
.
.
Answers
• Untouchable: 2, 5 (given above)
• Not untouchable: all the rest
• 3 is given above
• 4 is 1 + 3, and {1, 3} are the only proper divisors of 9
• 6 is 1 + 5 and {1, 5} are the only proper divisors of 25
• 7 is 1 + 2 + 4 and {1, 2, 4} are the only proper divisors of 8
• 8 is 1 + 7 and {1, 7} are the only proper divisors of 49
• 9 is 1 + 3 + 5 and {1, 3, 5} are the only proper divisors of 15
• 10 is 1 + 2 + 7 and {1, 2, 7} are the only proper divisors of 14
If you enjoyed this article on numbers with cool names, be sure to check out the previous two in the series:
Numbers With Cool Names Part 1 – Happy, Lucky, Perfect
Numbers With Cool Names Part 2 – Amicable, Sociable, Friendly
One comment
Leave a Reply
Fill in your details below or click an icon to log in:
WordPress.com Logo
You are commenting using your WordPress.com account. Log Out / Change )
Facebook photo
You are commenting using your Facebook account. Log Out / Change )
Connecting to %s
|
__label__pos
| 0.994657 |
Solve the system of equations by graphing. `x + 2y = 6` `2x + y = 9`
Expert Answers
embizze eNotes educator| Certified Educator
Graph the two lines: since they are given in general form the easiest way is often to find the intercepts.
For x+2y=6 the intercepts are (0,3) and (6,0)
For 2x+y=9 the intercepts are (0,9) and (4.5,0). Since we want to be accurate, it is better to plot lattice points (points with integer coordinates) so we find (3,3) is also on the line.
After graphing (see attachment), it appears that the solution is (4,1).
Check algebraically:
4+2(1)=6
2(4)+1=9
---------------------------------------------------------------------------------------
The solution is (4,1)
--------------------------------------------------------------------------------------
Images:
This image has been Flagged as inappropriate Click to unflag
Image (1 of 1)
vamkitten | Student
x + 2y = 6
2x + y = 9
x=6-2y
2(6-2y)=9
12-4y=9
3=4y
y=3/4
x=6-2(3/4)
=6-3/2
=4.5
|
__label__pos
| 0.993048 |
Order of a group
From Groupprops
Revision as of 15:37, 26 February 2011 by Vipul (talk | contribs) (Operations on groups and effects on the order)
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
Jump to: navigation, search
This article is about a basic definition in group theory. The article text may, however, contain advanced material.
VIEW: Definitions built on this | Facts about this: (facts closely related to Order of a group, all facts related to Order of a group) |Survey articles about this | Survey articles about definitions built on this
VIEW RELATED: Analogues of this | Variations of this | Opposites of this |[SHOW MORE]
This article defines an arithmetic function on groups
View other such arithmetic functions
Definition
QUICK PHRASES: size of a group, cardinality of a group, size of the underlying set, number of elements
Symbol-free definition
The order of a group is the cardinality (i.e., size, or number of elements) of its underlying set.
Definition with symbols
The order of a group G is the cardinality (i.e., size, or number of elements) of G as a set. it is denoted as \left| G \right|.
Note that a finite group is a group whose underlying set is finite, i.e., the size of the underlying set is finite. The order of a finite group is thus a natural number (note that the order cannot be zero because every group contains the identity element and is hence nonempty). For an infinite group, the order is an infinite cardinal.
Examples
• The trivial group, which is the group with only the identity element, has order 1. In fact, it is the only group (up to isomorphism}) that has order 1.
(see the next section for more examples).
Operations on groups and effects on the order
Operations that admit a clear formula
Operation Input groups and their orders Output group and its order Proof and comment
external direct product of two groups G has order a, H has order b G \times H has order ab order of direct product is product of orders; the same formula works for internal direct product, which is equivalent to external direct product.
external direct product of finitely many groups G_1, G_2, \dots, G_n with orders a_1, a_2, \dots, a_n respectively G_1 \times G_2 \times \dots \times G_n has order \prod_{i=1}^n a_i = a_1a_2 \dots a_n order of direct product is product of orders; same formula works for internal direct product
external semidirect product of two groups G, order a, H, order b, acting on it G \rtimes H has order ab order of semidirect product is product of orders; same formula works for internal semidirect product
group extension normal subgroup N, order a, quotient group G/N, order b ab follows from Lagrange's theorem
product of two subgroups H, order a, K order b, H \cap K has order c ab/c see product formula, also second isomorphism theorem
external wreath product H order a, K with wreathing action, order b, acting on a set of size n H \wr K has order a^nb Note that when we take the regular action, n = b and we get a^bb. When K is the symmetric group of degree n acting naturally on a set of size n, we get a^nn!.
Operations that do not admit a clear formula
These include, for instance, measuring the sizes of homomorphism sets between two groups.
Particular cases and general patterns
See also number of groups of given order for more information on how the number of groups of order n depends on n.
Small orders
Number Groups of that order More collected information on the list
1 trivial group --
2 cyclic group:Z2 --
3 cyclic group:Z3 --
4 cyclic group:Z4, Klein four-group
5 cyclic group:Z5 --
6 symmetric group:S3, cyclic group:Z6
7 cyclic group:Z7 --
8 cyclic group:Z8, direct product of Z4 and Z2, dihedral group:D8, quaternion group, elementary abelian group:E8 groups of order 8
16 (14 groups, too long to list) groups of order 16
General facts
Nature of order What we can say about groups of that order Proof
1 unique group: trivial group
prime p unique up to isomorphism: group of prime order, which is cyclic and is the additive group of a prime field equivalence of definitions of group of prime order
p^2, p prime cyclic group of prime-square order, elementary abelian group of prime-square order ?
p^3, p prime 5 groups -- 3 abelian and 2 non-abelian
cyclicity-forcing number -- product of distinct primes such that no prime divides any other prime minus one unique cyclic group of that order classification of cyclicity-forcing numbers
Divisibility relations
Divisibility relations for constructed objects
Construct Divisibility statement related to order Proof Comments
subgroup order of subgroup divides order of group Lagrange's theorem Not every order dividing the order of the group is realized as the order of a subgroup, see group having subgroups of all orders dividing the group order
quotient group order of quotient group divides order of group order of quotient group divides order of group -- corollary to Lagrange's theorem
order of element in the group order of element divides order of group order of element divides order of group Follows from Lagrange's theorem because the order of an element is the order of the cyclic subgroup it generates.
set on which the group acts transitively size of such a set divides the order of the group fundamental theorem of group actions identifies any set with a transitive group action with the left coset space of a stabilizer, then we use Lagrange's theorem
conjugacy class size of conjugacy class divides order of group size of conjugacy class divides order of group in fact, it divides order of inner automorphism group and is strictly smaller for a non-abelian group
degree of irreducible representation degree of irreducible representation over a splitting field divides order of group degree of irreducible representation divides order of group in fact, degree of irreducible representation divides index of abelian normal subgroup
Divisibility relations for other arithmetic functions
Any arithmetic function that arises as the maximum or lcm of a bunch of numbers each of which divides the order of the group, must again divide the order of the group. Some examples are given below:
Arithmetic function Description Proof that it divides order
exponent lcm of orders of all elements exponent divides order
Analogues
An order is a size measure that works for finite groups. For infinite groups, the order, viewed as an infinite cardinal, is a very crude size measure since it is unable to differentiate between the group and subgroups of finite index. Some analogues that work are described below.
• Measure is an analogue that is used for amenable groups, and works particularly well for compact groups with a Haar measure.
• Dimension plays a role analogous to the logarithm of the order. In those situations where the orders multiply, dimensions tend to add. There are many different notions of dimension, including algebraic, analytic, and topological ones.
• For profinite groups, we can view the orders as supernatural numbers, i.e., the orders take values that are products of powers of possibly infinitely many primes. Analogues of Lagrange's and Sylow's theorems hold in these contexts.
Order from subgroup and quotient perspectives
An infinite cyclic group (i.e., a group isomorphic to the group of integers) has no proper nontrivial finite subgroups, so from the perspective of Lagrange's theorem for subgroups, its order can be thought of as having no prime divisors. However, it has finite quotients of every order, so from the perspective of the fact that order of quotient group divides order of group, every prime divides its order.
More generally, for periodic groups and locally finite groups, the order notion must capture the possible finite subgroups that arise, whereas for residually finite groups, the order notion must capture the possible finite quotients that arise.
Computation
Template:GAP command for function
The GAP command to compute the order of a group is:
Order (group);
where
group
may either be an on-the-spot definition of a group or a name for something defined earlier.
References
Textbook references
|
__label__pos
| 0.881387 |
Glsl lerpGreen = slerp, blue = lerp, orange = nlerp. Here is an example of a medium sized angle (~90 degrees) interpolating the same time t between the angles. Lastly, here's a smaller angle (~35 degrees). You can see that the results of lerp / nlerp are more accurate as the angle between the interpolated vectors gets smaller.HvrShaderSubroutine. ¶. This component allows custom shaders to affect the HvrActor during the native plugin rendering step. This allows for complex effects to be written to affect the color, position and size of each voxel. A HvrShaderSubroutine component can be though of like Unity's material system. Where you have a shader with some ...GLSL Optimizations Many of the optimizations in this article are done automatically by some implementations, but often they are not. Therefore it helps to use these code optimizations, and they neither makes your code more complicated to read. Contents 1 Use Swizzle 2 Get MAD 2.1 Assignment with MAD 3 Fast Built-ins 3.1 Linear InterpolationNavigate to lerp-1..3-javadoc.jar you want to extract in File Explorer. Right-click lerp-1..3-sources.jar file → Select "Extract Here" in the drop-down context menu. Navigate to lerp-1.0.3-javadoc extracted folder in File Explorer. Open index.html file with your Browser to view the contents of the javadoc HTML files.2. lerp. lerp 函数的定义是. lerp(a, b, w) { return a + w*(b-a) } 当 w = 0 时,返回a,当 w = 1 时返回b,否则返回对 a 和 b 的差值,w 越接近0,返回结果越接近a,w越接近1,返回结果🈷️接近1,通常用来计算一些渐变量。 lerp (a, b, w) 中,a 和 b 需要同类型,可以是数值或向量Apr 27, 2015 · 为了避免shader中的分支,一种技术是使用lerp(linear interpolation)。也就是说,使用可能的条件变量作为lerp因子,因此如果它是0它是one颜色,如果它是1它是other颜色。 确保反转逻辑,因为第二个参数是它与cond=1混合的内容。这也允许你混合一半。 示例: 而不是 Feb 15, 2017 · GLSLのmix関数はUnityのlerp関数に変更します。 また、ShaderToyでは引数にスクリーン座標が渡されてくるため、正規座標系に直すために画面解像度(iResolution)で割り算していますが、 UnityではテクスチャのUV座標が使える のでそちらを使います。 Description. smoothstep performs smooth Hermite interpolation between 0 and 1 when edge0 x edge1.This is useful in cases where a threshold function with a smooth transition is desired. smoothstep is equivalent to: . genType t; /* Or genDType t; */ t = clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0); return t * t * (3.0 - 2.0 * t);12.4 - GLSL Operators (Mathematical and Logical)¶ GLSL is designed for efficient vector and matrix processing. Therefore almost all of its operators are overloaded to perform standard vector and matrix operations as defined in linear algebra.In cases where an operation is not defined in linear algebra, the operation is typically done component-wise, where the operation is performed on each ...lerp.glsl This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Pastebin.com is the number one paste tool since 2002. Pastebin is a website where you can store text online for a set period of time.However GLSL is quite difficult to work with since each and every graphics card manufacturer creates their own compiler for it, all of course with subtle differences. CgFx protected me from quite a lot of this because it converted the CgFX shader code into GLSL but now this needs to be handled manually. Aug 19, 2020 · Remarks. Linear interpolation is based on the following formula: x*(1-s) + y*s which can equivalently be written as x + s(y-x). Minimum Shader Model GLSL ES 3 clamp clamp — constrain a value to lie between two further values Declaration Parameters x Specify the value to constrain. minVal Specify the lower end of the range into which to constrain x. maxVal Specify the upper end of the range into which to constrain x. DescriptionGLSL Noise Algorithms . GitHub Gist: instantly share code, notes, and snippets.A Slerp path is, in fact, the spherical geometry equivalent of a path along a line segment in the plane; a great circle is a spherical geodesic . Oblique vector rectifies to Slerp factor. More familiar than the general Slerp formula is the case when the end vectors are perpendicular, in which case the formula is p0 cos θ + p1 sin θ.2. lerp. lerp 函数的定义是. lerp(a, b, w) { return a + w*(b-a) } 当 w = 0 时,返回a,当 w = 1 时返回b,否则返回对 a 和 b 的差值,w 越接近0,返回结果越接近a,w越接近1,返回结果🈷️接近1,通常用来计算一些渐变量。 lerp (a, b, w) 中,a 和 b 需要同类型,可以是数值或向量The std140 Layout Rules The set of rules shown in Table I.1 are used by the GLSL compiler to place members in an std140-qualified uniform block. This feature is available … - Selection from OpenGL Programming Guide: The Official Guide to Learning OpenGL, Version 4.3, Eighth Edition [Book] Aug 15, 2021 · The lexer file would define a GLSL token as a regular expression that matches all valid characters in GLSL. This will work as long as GLSL doesn't use the } character. (I have no idea what GLSL looks like) If GLSL uses curly brackets, you could try some character that is not in valid GLSL, or do something clunky like BEGIN_GLSL END_GLSL. de 2016 This tutorial will introduce rotations, translations and other affine transformations. 1 Feb 29, 2020 · I have two 2D unit vectors a and b. 2D Rotation is a process of rotating an object with respect to an angle in a two dimensional plane. You port your OpenGL Shader Language (GLSL) code to Microsoft High Level Shader Language (HLSL) code when you port your graphics architecture from OpenGL ES 2.0 to Direct3D 11 to create a game for Universal Windows Platform (UWP). The GLSL that is referred to herein is compatible with OpenGL ES 2.0; the HLSL is compatible with Direct3D 11.ベジェ曲線とは 数式で曲線を表現する方法の一つです。 コンピュータ上で滑らかな曲線を表現できるため、多くのドローソフトや文字の描画で採用されています。 ベジェ曲線の原理 ベジェ曲線を理解するには、1次ベジェ曲線から一つずつ次元を上げながら動きを見ていくのが分かりやすいと ...Dec 14, 2020 · Re: RAYMARCHING HLSL/GLSL (DX/OpenGL) I came up with an extremely easy way to set a DX-texture from a ByteArray. In this case the ByteArray width is 4 times the width of the image, each value represents the colors B G R A. The origin coordinate is (0,0) [not (1,1)]. EG before calling BytesArrayToTexture. Blending Advanced-OpenGL/Blending. Blending in OpenGL is commonly known as the technique to implement transparency within objects. Transparency is all about objects (or parts of them) not having a solid color, but having a combination of colors from the object itself and any other object behind it with varying intensity.编写Unity3D着色器的三种方式之美,过了2个星期,又来继续说着色器了。这两个星期发生的事情有点多。不管你会不会写Unity3D的shader,估计你会知道,Unity3D编写shader有三种方式,这篇东西主要就是说一下这三种东西有什么区别,和大概是怎样用的。先来列一下这三种方式 ...Hey I have a question to this Topic. I have something in my head like a pixelbased multimaterial. I want to seperate phongshader and labertshader by an pixelbased if calculation to save math operations. for example one part of the model use Nomalmaps and specular and an otherpart dont use it. i think to use vertex/fracment with an lerp Funktion. in the end it isnt specular normal some more ...This function could also be written as lerp(0.1, 0.8, Intensity) in a pseudocode similar to HLSL or GLSL. Advanced Function. This advanced Function shows the inner workings of a Pixel Processor meant for adjusting the Hue of a color map input based on the intensity of a second grayscale mask input.(GLSL has the out keyword as well, and allows for custom output variable names. Eventually, you must hit a main() function though.) The main functions in HLSL could be named anything you want, whereas in GLSL, it must be main(). float4 in HLSL is the same as vec4 in GLSL, a struct with 4 floats.This function could also be written as lerp(0.1, 0.8, Intensity) in a pseudocode similar to HLSL or GLSL. Advanced Function. This advanced Function shows the inner workings of a Pixel Processor meant for adjusting the Hue of a color map input based on the intensity of a second grayscale mask input.纹理数组不会跨切片采样,因此从技术上讲,这是预期的结果。 如果你想在切片之间进行插值(例如:1.5f 给你第二个纹理的“一半”和第三个纹理的“一半”),你可以使用 Texture3d 代替,它允许这样做(但会花费更多,因为它会执行三线性过滤) GLSL atomic. An Atomic Counter is a GLSL variable type who's storage comes from a Buffer Object. Atomic counters, as the name suggests, can have atomic memory operations performed on them. They can be thought of as a very limited form of buffer image variable.Otherwise, all that's new here is the lerp function. In this example, rather than reflecting anything, our Fresnel Rim is just a single color (_Color), but the principle is the same. If you wanted to turn the rim into a reflection, you'd simply replace the _Color variable with a color sampled from a cube map, or taken from a camera, or ...Deprecated alias for step_decimals. float dectime ( float value, float amount, float step ) Note: dectime has been deprecated and will be removed in Godot 4.0, please use move_toward instead. Returns the result of value decreased by step * amount. a = dectime(60, 10, 0.1)) # a is 59.0. float deg2rad ( float deg )Feb 15, 2017 · GLSLのmix関数はUnityのlerp関数に変更します。 また、ShaderToyでは引数にスクリーン座標が渡されてくるため、正規座標系に直すために画面解像度(iResolution)で割り算していますが、 UnityではテクスチャのUV座標が使える のでそちらを使います。 The std140 Layout Rules The set of rules shown in Table I.1 are used by the GLSL compiler to place members in an std140-qualified uniform block. This feature is available … - Selection from OpenGL Programming Guide: The Official Guide to Learning OpenGL, Version 4.3, Eighth Edition [Book] A Lerp node is used to blend between the clam being shut and open. The Rotation is applied to the GameObject's y-axis and z-axis. Rotating it around the x-axis. Swimming Fish. In this Shader, we're using a sine wave that's generated across the object's axis to make the fish wobble. We then mask off the head of the fish, so that the head ...level 1 Th3HolyMoose · 5y color = mix (color0, color1, max (0.0, noise - 0.5) * 2.0); that should do it! any value below 0.5 of noise will become 0, and a noise value of 1 will be come 0.5. Then you multiply that by 2 and you get a range of 0-1 for the mix function 3 level 2 piluve Op · 5y Hello!GLSL atomic. An Atomic Counter is a GLSL variable type who's storage comes from a Buffer Object. Atomic counters, as the name suggests, can have atomic memory operations performed on them. They can be thought of as a very limited form of buffer image variable.SSAO Advanced-Lighting/SSAO. We've briefly touched the topic in the basic lighting chapter: ambient lighting. Ambient lighting is a fixed light constant we add to the overall lighting of a scene to simulate the scattering of light. In reality, light scatters in all kinds of directions with varying intensities so the indirectly lit parts of a scene should also have varying intensities.HLSL函数列表本表来自网络,我对说明做了些修改。NameSyntaxDescriptionabsabs(x)返回x的绝对值。对x的每个元素都会独立计算一次。Absolute value (per component).acosacos(x)返回x的反余弦值。对x的每个元素都会独立计算一次。ReThis function could also be written as lerp(0.1, 0.8, Intensity) in a pseudocode similar to HLSL or GLSL. Advanced Function. This advanced Function shows the inner workings of a Pixel Processor meant for adjusting the Hue of a color map input based on the intensity of a second grayscale mask input.Lerp does a interpolation that returns values between the from and to input values for interpolation values between 0 and 1. The inverse of that is a function which we can hand a third value and it'll return how close that value is to the first or second value.Deprecated alias for step_decimals. float dectime ( float value, float amount, float step ) Note: dectime has been deprecated and will be removed in Godot 4.0, please use move_toward instead. Returns the result of value decreased by step * amount. a = dectime(60, 10, 0.1)) # a is 59.0. float deg2rad ( float deg )The actual formula is y = ( (256 - m) * x1 + m * x2 + 128) / 256 ) where, for a given pixel : y is the result (merge) m is the mask value (from 0 to 255) x1 is the first layer to merge. x2 is the second layer to merge. I am trying to port some avisynth scripts to shaders (GPU) but i am not an expert of .hlsl. Thanks.GLSL HLSL 説明; fract(x) frac(x) x の小数部を返します。 mod(x,y) fmod(x, y) x/y の浮動小数点の剰余を返します。 atan(x,y) atan2(x, y) 2 つの値 (x,y) のアークタンジェントを返します。 mix(x,y,a) lerp(x,y,a) xとyの間の線形補間。 texture2D(img,texcoord) tex2D(img,texcoord) 色取得de 2016 This tutorial will introduce rotations, translations and other affine transformations. 1 Feb 29, 2020 · I have two 2D unit vectors a and b. 2D Rotation is a process of rotating an object with respect to an angle in a two dimensional plane. Possible bug in Catalyst GLSL compiler. I ran into this issue while trying to implement SMAA in GLSL. The Catalyst driver simply makes my app crash on glCompileShader (). I checked if I did anything wrong by trying to compile the same shader in the AMD GPU ShaderAnalyzer, and it gave "unexpected error" as the shader assembly output.Cg inside GLSL. nVidia's implementation offers some extra features for GLSL. nVidia adds Cg features to GLSL. That means you can use Cg language features such as built-in functions (lerp (), etc) and types (half, half2, etc) and your shader will compile. Keep in mind that it will compile and yet it will give warning messages in the info log ...SSAO Advanced-Lighting/SSAO. We've briefly touched the topic in the basic lighting chapter: ambient lighting. Ambient lighting is a fixed light constant we add to the overall lighting of a scene to simulate the scattering of light. In reality, light scatters in all kinds of directions with varying intensities so the indirectly lit parts of a scene should also have varying intensities.In the last post we saw how to do cubic interpolation on a grid of data. Strangely enough, when that grid is a grid of pixel data, bicubic interpolation is a common method for resizing images! Bicubic interpolation can also used in realtime rendering to make textures look nicer when scaled than standard bilinear texture…Lerp: Linear interpolation between values A and B, using the floating position C. SmoothStep (A, B, C); SmoothStep: Returns a value from 0 to 1, compared between minimum value A and maximum B, using the value C. In some cases you can use them similarly, smoothstep only having a more slope-like-curve.GLSL ES 3 clamp clamp — constrain a value to lie between two further values Declaration Parameters x Specify the value to constrain. minVal Specify the lower end of the range into which to constrain x. maxVal Specify the upper end of the range into which to constrain x. DescriptionRe: Postprocessing: For real. Post. by AnyOldName3 » Tue Aug 17, 2021 6:16 pm. FiftyTifty wrote: ↑ Tue Aug 17, 2021 2:37 am For example: float ToneMappingCurve=lerp (lerp (ToneMappingCurveNight, ToneMappingCurveDay, ENightDayFactor), ToneMappingCurveInterior, EInteriorFactor); If you're just going to put the code on its own line, the old ...Green = slerp, blue = lerp, orange = nlerp. Here is an example of a medium sized angle (~90 degrees) interpolating the same time t between the angles. Lastly, here's a smaller angle (~35 degrees). You can see that the results of lerp / nlerp are more accurate as the angle between the interpolated vectors gets smaller.However GLSL is quite difficult to work with since each and every graphics card manufacturer creates their own compiler for it, all of course with subtle differences. CgFx protected me from quite a lot of this because it converted the CgFX shader code into GLSL but now this needs to be handled manually. In fact, the ray tracing shader used below is a translation of one written in GLSL, which would use OpenGL as its graphics API. This implementation was designed as part of a deferred shading pipeline. The effect runs after geometry buffer generation and lighting has completed. ... (lerp (0.0 f, 1.0 f, saturate (raySS. w * 4.0 f))); ...You can't easily communicate back to the CPU from within GLSL. Using glslDevil or other tools is your best bet. A printf would require trying to get back to the CPU from the GPU running the GLSL code. Instead, you can try pushing ahead to the display. Instead of trying to output text, output something visually distinctive to the screen.Many libraries and shading languages have a "lerp" helper-function (in GLSL known instead as mix ), returning an interpolation between two inputs (v0, v1) for a parameter (t) in the closed unit interval [0, 1]. Signatures between lerp functions are variously implemented in both the forms (v0, v1, t) and (t, v0, v1).Move object in canvas - Most syntax favors GLSL tokens over HLSL tokens: vec4 instead of float4, mix instead of lerp, and so on. Traso: MAME Fan Reged: 01/15/13 Posts: 2687 Send PM: Aye. That is awful pre-tty. (nt) [Re: SoltanGris42] #357255 - 08/02/16 10:03 PM Reply Scifi frauds. SF illuminates.Material Design on the GPU. One of the things I like about Material Design is that it builds on principles we see in the real world. Depth is used to convey information, light is used to indicate seams, and drop shadows follow convincing behaviours. Material design is inspired by tactile materials, such as paper and ink. […]Mar 25, 2017 · Processing.org. The Nature of Code. 別ブログ. カテゴリー. 3D (120) GLSL (48) GUI (9) p5.js (48) Three.js (11) What I've essentially done in terms of the math is remapped the range to be from 0-1 to -1-1. To do this I am using a concept called the Constant Bias Scale and to see that graph here's what it is: Using this graph here you can see if I want the range to go from 0-1 to -1-1 I just need to subtract .5 and multiply by 2.setra mercedes,avatar 3d 2x12 horizontal forte,pyqt qtablewidget get cell value,discord best mic settings reddit,wolf 814784,wix change page design,seasons and ecliptic simulator,final flight outfitters,battery light on after replacing battery,silvertown tunnel video - f3d
|
__label__pos
| 0.600828 |
SimpleXMLIterator::hasChildren
(PHP 5 >= 5.1.0, PHP 7)
SimpleXMLIterator::hasChildrenComprueba si el elemento actual tiene subelementos
Descripción
public bool SimpleXMLIterator::hasChildren ( void )
Este método comprueba si el elemento SimpleXMLIterator actual tiene sub elementos.
Parámetros
Esta función no tiene parámetros.
Valores devueltos
TRUE si el elemento actual tiene sub elementos, en caso contrario FALSE
Ejemplos
Ejemplo #1 Comprobar si el elemento actual tiene sub elementos
<?php
$xml
= <<<XML
<books>
<book>
<title>PHP Básico</title>
<author>Jim Smith</author>
</book>
<book>XML básico</book>
</books>
XML;
$xmlIterator = new SimpleXMLIterator$xml );
for(
$xmlIterator->rewind(); $xmlIterator->valid(); $xmlIterator->next() ) {
if(
$xmlIterator->hasChildren()) {
var_dump($xmlIterator->current());
}
}
?>
El resultado del ejemplo sería:
object(SimpleXMLIterator)#2 (2) {
["title"]=>
string(10) "PHP Básico"
["author"]=>
string(9) "Jim Smith"
}
add a note add a note
User Contributed Notes 1 note
up
-4
grummfy at gmail dot com
6 years ago
Hello,
just a note for people like me who encounter the problems :
<?php
foreach($xmlIterator as $x)
{
if(
$x->hasChildren())
{
//never reach
echo 'x : ';
var_dump($x->current());
}
if(
$xmlIterator->hasChildren())
{
echo
'iterator : ';
var_dump($x->current()); //all time null
var_dump($xmlIterator->current());
}
}
?>
To Top
|
__label__pos
| 0.578963 |
introducation to computers
Views:
Category: Education
Presentation Description
No description available.
Comments
By: srinivas2u (64 month(s) ago)
Nice Good Presentation
By: rajul_bhardwaj (71 month(s) ago)
send me this ppt i want this to teach my students. My email Id is: [email protected]
Presentation Transcript
Slide 1:
BASIC COMPUTER APPLICATIONS Devan Raju pericherla
Slide 2:
UNIT -I : COMPUTER CONCEPTS. UNIT-II : PROG. IN ‘C’ LANGUAGE. UNIT–III : INTRODUCTION TO MS-OFFICE (WORD & EXCEL) UNIT–IV : INTRODUCTION TO MS-OFFICE (POWER POINT & ACCESS) UNIT–V : INFORMATION INFRASTRUCTURE Internet and World Wide Web (WWW). Introduction to Structured Query Language (SQL)
UNIT - I :
UNIT - I COMPUTER CONCEPTS
DEFINITION OF A COMPUTER :
DEFINITION OF A COMPUTER
Slide 5:
A computer is an electronic device which accepts data from an input device processes it and gives information to output device Data Unordered Form. (raw material form) Processes Work. Information Ordered Form
Slide 6:
The word computer comes from the word “compute” which means to calculate. It is called as calculating device that performs arithmetic operations at enormous speed. A computer is a device that operates on data. A computer can store, process and retrieve data as and when required. The fact that computers process data is so fundamental that’s why many people have started calling computer as data processor. The activity of processing data using a computer is called data processing.
Diagram of a Computer :
Diagram of a Computer Monitor Key board Speakers Mouse C P U (Central Processing Unit )
Input and Output Devices :
Input and Output Devices
Slide 9:
Input devices From which devices you are accepting the data those devices are known as input devices Ex:- Keyboard, Mouse, Scanner, OCR, MICR, etc.., Output devices From which devices we are getting the information those devices are know as output devices Ex:- Monitor, Speakers, Printer Etc..,
INPUT DEVICES :
INPUT DEVICES
Slide 11:
Keyboard:- This is the most commonly used input device, this device accepts set of characters and translate them into a form, which computer can understand. Keyboard consists of 104 keys. Mouse:- Mouse is a small hand held device, that fits in the users palm. It is the most popular pointing and drawing device. This device is used to move the cursor on the computer screen to give the instructions to your computer and to run the programs or applications. It is also used to select menu commands, move icons, re-size windows and for closing windows.
Slide 12:
Scanner:- This device is used to input pictures and images into your computer. It converts images to digital form so that it can be fed into the computer.
Slide 13:
Optical Character Recognition (O C R):- The computer cannot interpret the stored document as letters, number and special characters. So in order to over come this problem we are using the technology OCR. This device converts the bit map images of characters to equivalent ASCII codes. OCR software is designed to recognize texts which are in OCR fonts. If the document contain different types of fonts, the OCR software will not work effectively.
Slide 15:
Magnetic-ink character recognition (MICR):- MICR is similar to OCR, which is used by the banking industry for fast processing of large volume of cheques. Before giving cheques to the customers, bank identification code (name, branch etc.), identification number and cheque number are printed in special characters with special ink, which contains magnetizable particles of iron oxide. The MICR supported character set which contains only symbols i.e. 0-9(10 digits) and 4 special characters
OUTPUT DEVICES :
OUTPUT DEVICES
Slide 18:
Monitor:- It is a output device used to display the information, programs and applications. It is also called as VDU (Visual Display Unit). Like television, monitors are also available in different sizes. Speakers:- It is also a output device, used to produce music or speech from programs. A speaker port allows to connect speaker to the computer. Speakers can be built in or we can attach separately.
Slide 19:
Printer:- It is also a output device used to generate a hard copy of files. There are two types of printers depending upon their qualities (1) resolution (2) print speed. Print resolution is measured as the number of dots per inch and print speed is typically measured in pg’s minute.
Slide 20:
Types of printers:- Drum Printer. Chain/Brand Printer Dot-Matrix Printer. Inkjet Printer. Laser Printer.
CHARACTERISTICS OF A COMPUTER:- :
CHARACTERISTICS OF A COMPUTER:-
Slide 22:
Speed Storage (or) Memory Accuracy Diligence Versatility
Slide 23:
Speed:- Computer is a fast calculating device, that performs in few seconds, that a human can do in an entire year. The units of speed in case of computers are micro seconds, nano seconds and Pico seconds. Memory:- Computer can store and recall any amount of information because of its secondary storage capacity. Even after several years, the information recalled would be accurate as on the day when it fed to the computer. It forgets or looses certain information only when it is asked to do so. The units of memory is in bytes.
Slide 24:
Accuracy :- The accuracy of a computer is consistently high and the degree of accuracy depends upon its own design. It will perform every operation with same accuracy. It can produce accurate results at a high degree of 100%. Diligence:- Computer is free from monotony, tiredness, and lack of concentration. It can work continuously for hours without errors. If 10 million calculations has to be performed, a computer can perform the 10th million calculation exactly with same accuracy and speed as first one.
Slide 25:
Versatility:- Computer can be used to solve the problems related to different fields like commercial, scientific, educational, research and defense. This is one of the most wonderful things about the computer. In one moment, it is preparing the results of an examination, the next moment, it is busy in preparing electricity bills and in between it may be helping an office section to trace important letters.
DIS-ADVANTAGES :
DIS-ADVANTAGES
Slide 27:
No IQ:- It cannot think by its own. A computer cannot take its own decisions. It has no intelligence. Its IQ is zero. Computers cannot make judgments of their own. The judgments is based on the instructions given to them in the form of programs that are written by us. No feelings:- Computers are devoid of emotions. They have no feelings because they are machines.
Slide 28:
BASIC ORGANIZATION OF A COMPUTER
Slide 29:
Central Processing Unit (CPU)
Memory Unit :
Memory Unit They are two types of memories Primary Memory. Secondary Memory or Auxiliary Memory. Primary memory:- RAM & ROM comes under primary memory. RAM (Random Access Memory) ROM (Read Only Memory)
Slide 31:
RAM (Random Access Memory) :- It is the key working area of the memory, also called as user memory. It varies from computer to computer and it determines the sizes and scope. The main feature of RAM is that can be read from or written onto and can be accessed automatically. The contents of RAM are available as long as the computer is not switched off, it loses the contents as soon as the power is switched off. So this is a volatile memory or temporary memory and the data that is stored here can be altered.
Slide 32:
ROM (Read Only Memory) :- As the name itself implies, it holds permanent data or instruction that can be read from, but not written on to, i.e. information is permanently recorded. So that user cannot change the instructions. It is a non-volatile memory, it means that the contents of ROM are not lost when the computer is switched off. ROM’s are also known as field stores, permanent stores or dead stores. They are two varieties of ROM’s PROM (Programmable Read Only Memory) EPROM (Erasable Programmable Read Only Memory)
Secondary Memory or Auxiliary Memory:- :
Secondary Memory or Auxiliary Memory:- This memory is again divided in to Sequential access device and Direct access devices. Magnetic tape comes under sequential access device. Magnetic disks and optical disks comes under direct access devices. Floppy disk and Hard disk comes under magnetic disks. CD comes under optical disks.
Slide 35:
Magnetic tapes:- It is the most popular storage medium for storing large amount of data. Magnetic tape medium is a plastic ribbon which is of ½ inch or ¼ inch wide, 30-2400 feet long. It is coated with magnetized recording material such as iron oxide or chromium dioxide. Data is recorded on the tape in the form of tiny invisible magnetized and non-magnetized spots on the surface of the tape.
Slide 36:
Magnetic disk:- It is a thin, circular plate made of metal or plastic, coated on both sides with a magnetized recording material such as iron oxide.
Slide 37:
Floppy disk:- Floppy disk is used to storage purpose the data and to transfer the data from one location to another location. This device provides the option to read or write data. The memory capacity of a floppy is 1.44 MB (Mega Bytes), size of a floppy disk is 3 ½ sq inches. Data storage will be in the form of tracks and sectors Demerits:- These are light-protecting disks so, it is better to kept at dark places. It will be affect on magnetic rays so better to avoid magnetic devices near by these devices.
Slide 39:
Hard disk:- It is not a single flexible disk, but a stack of metal disks sealed in box. The storage capacity of the hard disk can be from 2 GB to 120+ GB. These are very reliable as compared to floppies. It is rewritable and any information can be accessed in a fraction of second.
Slide 42:
Optical disk:- It is a high capacity secondary storage medium. It can store extreme large amounts of data in a limited space. An optical disk consists of rotating disk, which is coated with a thin metal. Laser beam technology is used for recording/reading of data on the disk. Due to the use of laser beam technology, optical disks are also known as laser disks or optical laser disks.
Slide 43:
Compact disks:- Compact disk contains digital information which can be read or write. It can hold vast amounts of information such as full-motion videos, animation, music etc., The memory capacity of CD is 700 MB. Data storage is in the form of microscopic pits or particles. Size – 12 cm in diameter. Due to its light weight, they are very easy to handle, store and transport from one place to another.
DIFFERENT TYPES OF CD DRIVES :
DIFFERENT TYPES OF CD DRIVES
LANGUAGES :
LANGUAGES
Slide 46:
Computer languages are basically divided in to 3 categories. Machine language Assembly language. High level or user level language
Slide 47:
Machine level language:- This language is also known as low level language or binary level language. This language contains combinations of 0’s and 1’s. It is an system understandable language.
Slide 48:
Assembly language:- This language is in the form of alphanumeric mnemonic codes. This language that allows instructions and storage locations to be represented by letters and symbols instead of numbers. This is a machine dependable language. Knowledge of hardware is required. Assembler:- it is a translator program that translates the assemble language program into machine language program.
Slide 49:
High level language:- This language contains English alphabets, numbers and mathematical symbols. This is a machine independent language. There is no need of dealing with machine level coding. In order to translate high level language to low level language. There are two converters namely 1) Complier 2) Interpreter
Slide 50:
Complier:- It is converter which is used to convert high level program to low level programs. It follows the procedure of at a time checking. Interpreter:- It is also a converter which is used to convert high level program to low level programs it follows the procedure of line by line checking.
Slide 51:
ASCII Code (American Standard Code Information Interchange):- ASCII-code is a "languages of digital devices", devices like computer or digital television A 65, B 66……… a 97,b 98………...
ALGORITHUMS AND FLOWCHARTS :
ALGORITHUMS AND FLOWCHARTS
Slide 53:
Algorithm:- It is a step by step description of how to arrive at the solution of the given problem. It is also defined as sequence of instructions designed in a manner such that desired results will be obtained. There are various ways to represent an algorithm. Normally programmers use one or more of the following ways to represent their algorithms As program. As flow charts. As pseudo codes.
Slide 54:
Flowcharts:- A flowchart is a pictorial representation of an algorithm. It uses boxes of different shapes to represent different types of instructions.
Slide 55:
Basic Flowchart Symbols:- Processing
Slide 56:
Principles or rules of a flow chart:- First chart the main line of logic, then incorporate detail. Words in flowchart symbols should be common statements, which are easy to understand. Be consistent in using names and variables in flowchart. Go from left to right, which top to bottom in constructing flowcharts. Keep the flowchart as simple as possible. If a new flowcharting page is needed it is recommended that the flowchart be broken at an input or output point
Slide 57:
Limitations:- Flowcharts are very time consuming. There are no standard determining the amount of detail that should be in closed in a flowchart. Re drawing a flowchart being a tedious task. Programmers do not redraw the flowchart ,when the program is modified
MEMORY MEASUREMENTS :
MEMORY MEASUREMENTS
Slide 59:
BIT :- It is small memory location which can able to store either 0 or 1. 4 Bits = 1 Nibble 8 Bits =1 Byte. 1024 Bytes = 1 Kilo Byte (KB). 1024 kilo Bytes = 1 Mega Byte (MB). 1024 Mega Bytes = 1 Gaga Byte (GB).
COMPUTER VIRUS :
COMPUTER VIRUS
Slide 61:
Computer virus:- A virus is a program, that can infect other program by modify them. The modification includes creating a duplicate copy of the virus program, which can infect other programs. Virus takes a temporary control of operating system. When the infected software comes into contact with an uninfected software, a fresh copy of a virus passes into the uninfected software. Thus the infection can be spread from one software to software or computer to computer. A virus can do anything that other programs do. The only difference is, it attaches itself to another program and executes secretly when the host program is running. Once a virus is executing, it can perform any function.
Slide 62:
Types of virus are:- 1)Parasitie virus. 2)Memory Resident virus. 3)Stealth virus. 4)Polymorphic virus.
HARDWARE AND SOFTWARE :
HARDWARE AND SOFTWARE
Slide 64:
Computer consists of two basic parts 1) Hardware. 2) Software. Hardware :- Physical components of a computer is known as hardware. Hardware is the term given to the machinery and the various individual pieces of the computer. Hardware components which we can touch and we can able to see each and every part. Ex:-Keyboard, Mouse, Monitor, CD Drive, Printer and all visible parts inside CPU.
Slide 65:
Software:- Computer cannot do anything on its own. It must be instructed to do a desired job. Hence it is necessary to specify a sequence of instructions to do our job. This sequence of instructions is known as program. The group of programs forms a software.
Slide 66:
Software is divided in to two types 1) System Software 2) Application Software. System software:- It is software designed to control the operations and to increase the processing capabilities of the computer. It makes the computer system more effective and efficient. All the operating systems are system software's. Application software's:- Programs developed for solving specific problems through a specific task is known as application software.
OPERATING SYSTEM :
OPERATING SYSTEM
Slide 68:
OPERATING SYSTEM (O.S):- O.S is a system software which acts as a interface between user and computer. The work of O.S is to control the resources of the system and provides its users more convenient to use. Functions of operating system:- Process management:- it includes scheduling of various system resources to the different processes, and providing communication between the processes. Memory management:- it includes allocations and de –allocation of memory space to various programs.
Slide 69:
File management:-it take care about file related activates such as organization, storing, retrieval, naming sharing and protection of files. Security:- operating system protects the resources and information of a computer system against destruction and unauthorized access. Not only the above it is also responsible for batch processing, time sharing, checking of errors and loading, executing of user programs etc.,
Slide 70:
Operating system is divided in to two types:- Single user operating system Multi user operating system. Single user operating system:- As the name specifies, One user using one system at a time is known as single user operating system Example:- MS-Dos, Windows 95,98, 2000, XP, etc., Multi user operating system:- As the name specifies many users using single system at a time is known as multi user operating system. Example:- Unix, Linux, Win NT etc..,
Slide 71:
MS-DOS (Microsoft Disk Operating System):- It is a single user operating system introduced in the year 1981 jointly by Microsoft and IBM. It become most popular operating system in 1980’s for personal computers. Its popularity started reducing in the year 1990’s due to the launch of Microsoft Windows Operating System. Unix Operating System:- It is a multi user operating system, developed in early 1970’s . It is the first operating system to be written in high level language ‘C’. This operating system become popular due to the concept of ”porting” .
Slide 72:
Microsoft windows operating system :- This operating system was developed by Microsoft to overcome the limitations of own MS-DOS. The main important features are It is a graphical user interface. (GUI) Hence it is easier for a new user to learn and use the system. It is single user multitasking operating system. That is a user may run more than one program at a time. The monitor screen can be partitioned into multiple windows and the progress of different programs can be viewed on different windows. The first successful version of windows is 3.0, windows 95, 2000, XP, Vesta.
EVOLUTION OF COMPUTERS :
EVOLUTION OF COMPUTERS
Slide 74:
Historians start the history of calculation with “ABASCUS” (a calculating device) in around 5000 B.C. John napier a Scottish mathematician, did considerable work on aids for calculation and invention of algorithms in 1614. In 1620, Willam oughtre, an English mathematician invented the slide rule. In 1642 Blaise pascal, French mathematician invented the Ist mechanical calculating machine. In 1671 gottfried von leibnitz, a German mathematician invented a calculating machine which was able to perform multiplications and divisions
Slide 75:
In 1822 Charles Babbage a professor of mathematics at Cambridge university designed a machine called difference engine. This machine can perform simple computations for trigonometry and logarithmic tables. After Second World war II, Howard H. Aiken of Harvard university designed a machine that can automatically perform a sequence of arithmetic operations; After that ENIAC ( Electronic Numerical Integrator and Calculator) was developed in 1946, was the first electronic calculator. In between 1947 and 1950, the more school personnel and the Billistics research laboratory of U.S army built a computer named EDVAC (Electronic Discrete Variable Automatic Computer).
Slide 76:
Neumann (frequently referred as the father of modern computers) was to introduce the stored program concept in a computer. After that generations started…..,
GENERATIONS OF A COMPUTER :
GENERATIONS OF A COMPUTER
Slide 78:
FIRST GENERATION:- Vacuum tubes technology. Limited storage capacity. Slow in speed. Huge in size and produces over heat.
Slide 79:
SECOND GENERATION:- Transistors and diodes technology. Increase in storage capacity Faster in speed Reduction in size and heat production
Slide 80:
THIRD GENERATION:- Integrated circuits technology (I C’s). Smaller in size and better in performance. Extensive use of high level programming. Remote processing and time sharing.
Slide 81:
FOURTH GENERATION:- Large scale integration technology circuits. (V L S I) Increased in storage capacity. Faster and smaller. Modular design, Versatility and compatibility. Sophisticated programs and languages for special applications.
Slide 82:
From the above, we can find computers were very big and consumed a lot of power, heat up tremendously and shut down frequently. Because of all these they are very expensive to build and maintain. As the technology improved computer became smaller and smaller, they became more faster and more powerful.
Slide 83:
Power supply box (SMPS) R A M Mother Board Processor C D Drive Floppy Drive Hard disk
END OF UNIT-I :
END OF UNIT-I
authorStream Live Help
|
__label__pos
| 0.878892 |
Saving configuration information using key value pairs
There are times when you need to save configuration information for a project. I get this requirement time and again. Since most of my projects are database driven, I decided to use a database table dedicated to save this config info. I also developed few functions to read and set these settings.
Setting up the database
First here’s the table structure. Pretty simple though it has 3 fields (ie: columns) per record. The ‘metaId’ field is set to auto increment. So that every time I add a new record I don’t have to worry about setting a record id. ‘metaKey’ hold the name of the variable, or the ‘key’ and ‘metaValue’ hold the value of the variable. In order to understand the ‘table_prefix’ best practice, please go here. It will explain the meaning of ‘$dbprefix’ and ‘$db’variables.
CREATE TABLE `sls_config` (
`metaId` int(11) NOT NULL auto_increment,
`metaKey` varchar(50) NOT NULL,
`metaValue` varchar(50) NOT NULL,
KEY `metaId` (`metaId`)
) ENGINE=MyISAM COMMENT='stores config information';
Although I thought 50 characters are enough for a variable (and its value), you may need to increase/decrease that depending on your scenario. ‘int’ will make sure we will never run-out of variables; ie: it allows 65,536 key/value pairs ! You may need to index this with metaKey, but I didn’t because this only has 3 fields total.
Creating a new config variable
You can provide a metaValue at the time of creating a metaKey. If you do not provide one, 0 will be used.
FUNCTION createMetaKey($metaKey, $metaValue=0)
{
/*
Create a new key/value pair. If metaValue is not provided default = 0
input :
metaKey, metaValue
Output:
1 if success, -1 if duplicate found, 0 db write error
*/
global $dbprefix,$db;
//check for duplicate
$tmp = getMetaValue($metaKey);
if ($tmp) {
return -1; //key exist, do nothing
} else {
$query="INSERT INTO ".$dbprefix."config (metaKey, metaValue)
VALUES ('$metaKey','$metaValue') "; // insert data
$result = mysql_query($query);
$tmp=mysql_affected_rows($db)."<p>";
if ($tmp) {
return 1;
} else {
return 0;
}
} //end ELSE-IF
} //end of createMetaKey
Reading a config variable
I believe the code is self explanatory. I use plain vanilla db access code. With the LIMIT clause I only read 1 record. Although this is an extra precaution, I take care of this under ‘creating new config variable’. You need to make sure the variable $metakey is wrapped in inverted commas.
FUNCTION getMetaValue($metaKey)
{
/*
Reads config info from db
input :
metaKey
Output :
metaValue if key is found, 0 otherwise
*/
global $dbprefix,$db;
$query = "SELECT * FROM ".$dbprefix."config WHERE metaKey='$metaKey' LIMIT 1";
$result = mysql_query($query);
$numOfRecs=mysql_numrows($result);
if ($numOfRecs) {
$metaValue = mysql_result($result,0,"metaValue");
return $metaValue;
} else {
return 0;
}
} //end of getMetaValue
Changing the value of a config variable
FUNCTION setMetaValue($metaKey, $metaValue)
{
/*
Sets config info in the db
input :
metaKey, metaValue
Output:
1 if success, 0 otherwise
*/
global $dbprefix,$db;
$query="UPDATE ".$dbprefix."config SET
metaValue=$metaValue
WHERE metaKey='$metaKey'";
//echo "query=$query : recs=$numOfRecs<br>";
$result = mysql_query($query);
$tmp=mysql_affected_rows($db)."<p>";
if ($tmp) {
return 1;
} else {
return 0;
}
//echo "result=$tmp<br>";
} //end of setMetaValue
Here’s a sample test file
This file first creates a key named ‘version’ with a default value ‘3.2’. Then value is changed to ‘3.3’ and is read and echoed to screen. This uses two include files. First one being database config file. Read here for more info.
<?php
require_once("inc_dbaccess.php");
require_once("inc_functions.php");
echo "<h1>Testing config manager</h1>";
echo "<h2>Creating</h2>";
$m_key = 'version';
$m_value = '3.2';
echo "key=$m_key : value=$m_value (value is optional) <br>";
$tmp = createMetaKey($m_key, $m_value);
switch($tmp){
case -1 :
echo "key already exist";
break;
case 0 :
echo "db write error";
break;
case 1 :
echo "key created";
break;
} //end SWITCH
echo "<h2>Setting</h2>";
$m_key = 'version';
$m_value = '3.3';
echo "key=$m_key : NEW value=$m_value <br>";
$tmp = setMetaValue($m_key, $m_value);
if ($tmp) {
echo "key updated succussfully";
} else {
echo "key update failed";
exit;
}
echo "<h2>Reading</h2>";
$m_key = 'version';
$tmp = getMetaValue($m_key);
if ($tmp) {
echo "key='$m_key' : value= ".$tmp;
} else {
echo "Wrong key.";
exit;
}
?>
Conclusion
The code can be further optimized, eg: doing away with the ‘$tmp’ variable, but then again the objective of keeping that is purely to do with usability.
I keep these functions under a file named ‘inc_functions.php’ and include it in all files, as below:
<?php
require_once("inc_functions.php");
?>
There also times where you do not want to use a database to save configuration information. XML would be the next best option. Let’s see how.
Downloads
SQL file, PHP function library and Test file
3 thoughts on “Saving configuration information using key value pairs
1. Hi can you also show how to use XML for the same purpose please. I am still not clear about XML under PHP.
Thanks
2. Cool idea indeed. Saves disk space, I used a text file for the same purpose. But things like file locks etc… made think of a better way to handle config info. You showed the perfect solution.
Thanks for sharing.
Leave a Reply
Your email address will not be published. Required fields are marked *
|
__label__pos
| 0.994387 |
blob: f8f52483a707bd6c193c627b80e82fd394e91d5e [file] [log] [blame]
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "debugger.h"
#include <sys/uio.h>
#include <set>
#include "class_linker.h"
#include "class_loader.h"
#include "context.h"
#include "object_utils.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
#include "stack_indirect_reference_table.h"
#include "thread_list.h"
extern "C" void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*), void*);
#ifndef HAVE_ANDROID_OS
void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*), void*) {
// No-op for glibc.
}
#endif
namespace art {
static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
static const size_t kNumAllocRecords = 512; // Must be power of 2.
class ObjectRegistry {
public:
ObjectRegistry() : lock_("ObjectRegistry lock") {
}
JDWP::ObjectId Add(Object* o) {
if (o == NULL) {
return 0;
}
JDWP::ObjectId id = static_cast<JDWP::ObjectId>(reinterpret_cast<uintptr_t>(o));
MutexLock mu(lock_);
map_[id] = o;
return id;
}
void Clear() {
MutexLock mu(lock_);
LOG(DEBUG) << "Debugger has detached; object registry had " << map_.size() << " entries";
map_.clear();
}
bool Contains(JDWP::ObjectId id) {
MutexLock mu(lock_);
return map_.find(id) != map_.end();
}
template<typename T> T Get(JDWP::ObjectId id) {
MutexLock mu(lock_);
typedef std::map<JDWP::ObjectId, Object*>::iterator It; // C++0x auto
It it = map_.find(id);
return (it != map_.end()) ? reinterpret_cast<T>(it->second) : NULL;
}
void VisitRoots(Heap::RootVisitor* visitor, void* arg) {
MutexLock mu(lock_);
typedef std::map<JDWP::ObjectId, Object*>::iterator It; // C++0x auto
for (It it = map_.begin(); it != map_.end(); ++it) {
visitor(it->second, arg);
}
}
private:
Mutex lock_;
std::map<JDWP::ObjectId, Object*> map_;
};
struct AllocRecordStackTraceElement {
Method* method;
uintptr_t raw_pc;
int32_t LineNumber() const {
return MethodHelper(method).GetLineNumFromNativePC(raw_pc);
}
};
struct AllocRecord {
Class* type;
size_t byte_count;
uint16_t thin_lock_id;
AllocRecordStackTraceElement stack[kMaxAllocRecordStackDepth]; // Unused entries have NULL method.
size_t GetDepth() {
size_t depth = 0;
while (depth < kMaxAllocRecordStackDepth && stack[depth].method != NULL) {
++depth;
}
return depth;
}
};
// JDWP is allowed unless the Zygote forbids it.
static bool gJdwpAllowed = true;
// Was there a -Xrunjdwp or -agent argument on the command-line?
static bool gJdwpConfigured = false;
// Broken-down JDWP options. (Only valid if gJdwpConfigured is true.)
static JDWP::JdwpOptions gJdwpOptions;
// Runtime JDWP state.
static JDWP::JdwpState* gJdwpState = NULL;
static bool gDebuggerConnected; // debugger or DDMS is connected.
static bool gDebuggerActive; // debugger is making requests.
static bool gDdmThreadNotification = false;
// DDMS GC-related settings.
static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
static Dbg::HpsgWhat gDdmHpsgWhat;
static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
static Dbg::HpsgWhat gDdmNhsgWhat;
static ObjectRegistry* gRegistry = NULL;
// Recent allocation tracking.
static Mutex gAllocTrackerLock("AllocTracker lock");
AllocRecord* Dbg::recent_allocation_records_ = NULL; // TODO: CircularBuffer<AllocRecord>
static size_t gAllocRecordHead = 0;
static size_t gAllocRecordCount = 0;
static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
// JDWP deliberately uses the descriptor characters' ASCII values for its enum.
// Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
return static_cast<JDWP::JdwpTag>(descriptor[0]);
}
static JDWP::JdwpTag TagFromClass(Class* c) {
CHECK(c != NULL);
if (c->IsArrayClass()) {
return JDWP::JT_ARRAY;
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (c->IsStringClass()) {
return JDWP::JT_STRING;
} else if (c->IsClassClass()) {
return JDWP::JT_CLASS_OBJECT;
} else if (c->InstanceOf(class_linker->FindSystemClass("Ljava/lang/Thread;"))) {
return JDWP::JT_THREAD;
} else if (c->InstanceOf(class_linker->FindSystemClass("Ljava/lang/ThreadGroup;"))) {
return JDWP::JT_THREAD_GROUP;
} else if (c->InstanceOf(class_linker->FindSystemClass("Ljava/lang/ClassLoader;"))) {
return JDWP::JT_CLASS_LOADER;
} else {
return JDWP::JT_OBJECT;
}
}
/*
* Objects declared to hold Object might actually hold a more specific
* type. The debugger may take a special interest in these (e.g. it
* wants to display the contents of Strings), so we want to return an
* appropriate tag.
*
* Null objects are tagged JT_OBJECT.
*/
static JDWP::JdwpTag TagFromObject(const Object* o) {
return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass());
}
static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
switch (tag) {
case JDWP::JT_BOOLEAN:
case JDWP::JT_BYTE:
case JDWP::JT_CHAR:
case JDWP::JT_FLOAT:
case JDWP::JT_DOUBLE:
case JDWP::JT_INT:
case JDWP::JT_LONG:
case JDWP::JT_SHORT:
case JDWP::JT_VOID:
return true;
default:
return false;
}
}
/*
* Handle one of the JDWP name/value pairs.
*
* JDWP options are:
* help: if specified, show help message and bail
* transport: may be dt_socket or dt_shmem
* address: for dt_socket, "host:port", or just "port" when listening
* server: if "y", wait for debugger to attach; if "n", attach to debugger
* timeout: how long to wait for debugger to connect / listen
*
* Useful with server=n (these aren't supported yet):
* onthrow=<exception-name>: connect to debugger when exception thrown
* onuncaught=y|n: connect to debugger when uncaught exception thrown
* launch=<command-line>: launch the debugger itself
*
* The "transport" option is required, as is "address" if server=n.
*/
static bool ParseJdwpOption(const std::string& name, const std::string& value) {
if (name == "transport") {
if (value == "dt_socket") {
gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
} else if (value == "dt_android_adb") {
gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
} else {
LOG(ERROR) << "JDWP transport not supported: " << value;
return false;
}
} else if (name == "server") {
if (value == "n") {
gJdwpOptions.server = false;
} else if (value == "y") {
gJdwpOptions.server = true;
} else {
LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
return false;
}
} else if (name == "suspend") {
if (value == "n") {
gJdwpOptions.suspend = false;
} else if (value == "y") {
gJdwpOptions.suspend = true;
} else {
LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
return false;
}
} else if (name == "address") {
/* this is either <port> or <host>:<port> */
std::string port_string;
gJdwpOptions.host.clear();
std::string::size_type colon = value.find(':');
if (colon != std::string::npos) {
gJdwpOptions.host = value.substr(0, colon);
port_string = value.substr(colon + 1);
} else {
port_string = value;
}
if (port_string.empty()) {
LOG(ERROR) << "JDWP address missing port: " << value;
return false;
}
char* end;
uint64_t port = strtoul(port_string.c_str(), &end, 10);
if (*end != '\0' || port > 0xffff) {
LOG(ERROR) << "JDWP address has junk in port field: " << value;
return false;
}
gJdwpOptions.port = port;
} else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
/* valid but unsupported */
LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
} else {
LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
}
return true;
}
/*
* Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
* "transport=dt_socket,address=8000,server=y,suspend=n"
*/
bool Dbg::ParseJdwpOptions(const std::string& options) {
VLOG(jdwp) << "ParseJdwpOptions: " << options;
std::vector<std::string> pairs;
Split(options, ',', pairs);
for (size_t i = 0; i < pairs.size(); ++i) {
std::string::size_type equals = pairs[i].find('=');
if (equals == std::string::npos) {
LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
return false;
}
ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
}
if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
LOG(ERROR) << "Must specify JDWP transport: " << options;
}
if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
return false;
}
gJdwpConfigured = true;
return true;
}
void Dbg::StartJdwp() {
if (!gJdwpAllowed || !gJdwpConfigured) {
// No JDWP for you!
return;
}
CHECK(gRegistry == NULL);
gRegistry = new ObjectRegistry;
// Init JDWP if the debugger is enabled. This may connect out to a
// debugger, passively listen for a debugger, or block waiting for a
// debugger.
gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
if (gJdwpState == NULL) {
// We probably failed because some other process has the port already, which means that
// if we don't abort the user is likely to think they're talking to us when they're actually
// talking to that other process.
LOG(FATAL) << "Debugger thread failed to initialize";
}
// If a debugger has already attached, send the "welcome" message.
// This may cause us to suspend all threads.
if (gJdwpState->IsActive()) {
//ScopedThreadStateChange tsc(Thread::Current(), Thread::kRunnable);
if (!gJdwpState->PostVMStart()) {
LOG(WARNING) << "Failed to post 'start' message to debugger";
}
}
}
void Dbg::StopJdwp() {
delete gJdwpState;
delete gRegistry;
gRegistry = NULL;
}
void Dbg::GcDidFinish() {
if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
LOG(DEBUG) << "Sending VM heap info to DDM";
DdmSendHeapInfo(gDdmHpifWhen);
}
if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
LOG(DEBUG) << "Dumping VM heap to DDM";
DdmSendHeapSegments(false);
}
if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
LOG(DEBUG) << "Dumping native heap to DDM";
DdmSendHeapSegments(true);
}
}
void Dbg::SetJdwpAllowed(bool allowed) {
gJdwpAllowed = allowed;
}
DebugInvokeReq* Dbg::GetInvokeReq() {
return Thread::Current()->GetInvokeReq();
}
Thread* Dbg::GetDebugThread() {
return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
}
void Dbg::ClearWaitForEventThread() {
gJdwpState->ClearWaitForEventThread();
}
void Dbg::Connected() {
CHECK(!gDebuggerConnected);
VLOG(jdwp) << "JDWP has attached";
gDebuggerConnected = true;
}
void Dbg::GoActive() {
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
// Only called from the JDWP handler thread.
if (gDebuggerActive) {
return;
}
LOG(INFO) << "Debugger is active";
// TODO: CHECK we don't have any outstanding breakpoints.
gDebuggerActive = true;
//dvmEnableAllSubMode(kSubModeDebuggerActive);
}
void Dbg::Disconnected() {
CHECK(gDebuggerConnected);
gDebuggerActive = false;
//dvmDisableAllSubMode(kSubModeDebuggerActive);
gRegistry->Clear();
gDebuggerConnected = false;
}
bool Dbg::IsDebuggerConnected() {
return gDebuggerActive;
}
bool Dbg::IsDebuggingEnabled() {
return gJdwpConfigured;
}
int64_t Dbg::LastDebuggerActivity() {
return gJdwpState->LastDebuggerActivity();
}
int Dbg::ThreadRunning() {
return static_cast<int>(Thread::Current()->SetState(Thread::kRunnable));
}
int Dbg::ThreadWaiting() {
return static_cast<int>(Thread::Current()->SetState(Thread::kVmWait));
}
int Dbg::ThreadContinuing(int new_state) {
return static_cast<int>(Thread::Current()->SetState(static_cast<Thread::State>(new_state)));
}
void Dbg::UndoDebuggerSuspensions() {
Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
}
void Dbg::Exit(int status) {
exit(status); // This is all dalvik did.
}
void Dbg::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
if (gRegistry != NULL) {
gRegistry->VisitRoots(visitor, arg);
}
}
std::string Dbg::GetClassDescriptor(JDWP::RefTypeId classId) {
Object* o = gRegistry->Get<Object*>(classId);
if (o == NULL || !o->IsClass()) {
return StringPrintf("non-class %p", o); // This is only used for debugging output anyway.
}
return ClassHelper(o->AsClass()).GetDescriptor();
}
bool Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& classObjectId) {
Object* o = gRegistry->Get<Object*>(id);
if (o == NULL || !o->IsClass()) {
return false;
}
classObjectId = gRegistry->Add(o);
return true;
}
bool Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclassId) {
Object* o = gRegistry->Get<Object*>(id);
if (o == NULL || !o->IsClass()) {
return false;
}
superclassId = gRegistry->Add(o->AsClass()->GetSuperClass());
return true;
}
JDWP::ObjectId Dbg::GetClassLoader(JDWP::RefTypeId id) {
Object* o = gRegistry->Get<Object*>(id);
return gRegistry->Add(o->GetClass()->GetClassLoader());
}
bool Dbg::GetAccessFlags(JDWP::RefTypeId id, uint32_t& access_flags) {
Object* o = gRegistry->Get<Object*>(id);
if (o == NULL || !o->IsClass()) {
return false;
}
access_flags = o->AsClass()->GetAccessFlags() & kAccJavaFlagsMask;
return true;
}
bool Dbg::IsInterface(JDWP::RefTypeId classId, bool& is_interface) {
Object* o = gRegistry->Get<Object*>(classId);
if (o == NULL || !o->IsClass()) {
return false;
}
is_interface = o->AsClass()->IsInterface();
return true;
}
void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
// Get the complete list of reference classes (i.e. all classes except
// the primitive types).
// Returns a newly-allocated buffer full of RefTypeId values.
struct ClassListCreator {
explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
}
static bool Visit(Class* c, void* arg) {
return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
}
bool Visit(Class* c) {
if (!c->IsPrimitive()) {
classes.push_back(static_cast<JDWP::RefTypeId>(gRegistry->Add(c)));
}
return true;
}
std::vector<JDWP::RefTypeId>& classes;
};
ClassListCreator clc(classes);
Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc);
}
void Dbg::GetVisibleClassList(JDWP::ObjectId classLoaderId, uint32_t* pNumClasses, JDWP::RefTypeId** pClassRefBuf) {
UNIMPLEMENTED(FATAL);
}
bool Dbg::GetClassInfo(JDWP::RefTypeId classId, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) {
Object* o = gRegistry->Get<Object*>(classId);
if (o == NULL || !o->IsClass()) {
return false;
}
Class* c = o->AsClass();
if (c->IsArrayClass()) {
*pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
*pTypeTag = JDWP::TT_ARRAY;
} else {
if (c->IsErroneous()) {
*pStatus = JDWP::CS_ERROR;
} else {
*pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
}
*pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
}
if (pDescriptor != NULL) {
*pDescriptor = ClassHelper(c).GetDescriptor();
}
return true;
}
void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
std::vector<Class*> classes;
Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
ids.clear();
for (size_t i = 0; i < classes.size(); ++i) {
ids.push_back(gRegistry->Add(classes[i]));
}
}
void Dbg::GetObjectType(JDWP::ObjectId objectId, JDWP::JdwpTypeTag* pRefTypeTag, JDWP::RefTypeId* pRefTypeId) {
Object* o = gRegistry->Get<Object*>(objectId);
if (o->GetClass()->IsArrayClass()) {
*pRefTypeTag = JDWP::TT_ARRAY;
} else if (o->GetClass()->IsInterface()) {
*pRefTypeTag = JDWP::TT_INTERFACE;
} else {
*pRefTypeTag = JDWP::TT_CLASS;
}
*pRefTypeId = gRegistry->Add(o->GetClass());
}
uint8_t Dbg::GetClassObjectType(JDWP::RefTypeId refTypeId) {
UNIMPLEMENTED(FATAL);
return 0;
}
bool Dbg::GetSignature(JDWP::RefTypeId refTypeId, std::string& signature) {
Object* o = gRegistry->Get<Object*>(refTypeId);
if (o == NULL || !o->IsClass()) {
return false;
}
signature = ClassHelper(o->AsClass()).GetDescriptor();
return true;
}
bool Dbg::GetSourceFile(JDWP::RefTypeId refTypeId, std::string& result) {
Object* o = gRegistry->Get<Object*>(refTypeId);
if (o == NULL || !o->IsClass()) {
return false;
}
result = ClassHelper(o->AsClass()).GetSourceFile();
return result != NULL;
}
uint8_t Dbg::GetObjectTag(JDWP::ObjectId objectId) {
Object* o = gRegistry->Get<Object*>(objectId);
return TagFromObject(o);
}
size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
switch (tag) {
case JDWP::JT_VOID:
return 0;
case JDWP::JT_BYTE:
case JDWP::JT_BOOLEAN:
return 1;
case JDWP::JT_CHAR:
case JDWP::JT_SHORT:
return 2;
case JDWP::JT_FLOAT:
case JDWP::JT_INT:
return 4;
case JDWP::JT_ARRAY:
case JDWP::JT_OBJECT:
case JDWP::JT_STRING:
case JDWP::JT_THREAD:
case JDWP::JT_THREAD_GROUP:
case JDWP::JT_CLASS_LOADER:
case JDWP::JT_CLASS_OBJECT:
return sizeof(JDWP::ObjectId);
case JDWP::JT_DOUBLE:
case JDWP::JT_LONG:
return 8;
default:
LOG(FATAL) << "Unknown tag " << tag;
return -1;
}
}
int Dbg::GetArrayLength(JDWP::ObjectId arrayId) {
Object* o = gRegistry->Get<Object*>(arrayId);
Array* a = o->AsArray();
return a->GetLength();
}
uint8_t Dbg::GetArrayElementTag(JDWP::ObjectId arrayId) {
Object* o = gRegistry->Get<Object*>(arrayId);
Array* a = o->AsArray();
std::string descriptor(ClassHelper(a->GetClass()).GetDescriptor());
JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
if (!IsPrimitiveTag(tag)) {
tag = TagFromClass(a->GetClass()->GetComponentType());
}
return tag;
}
bool Dbg::OutputArray(JDWP::ObjectId arrayId, int offset, int count, JDWP::ExpandBuf* pReply) {
Object* o = gRegistry->Get<Object*>(arrayId);
Array* a = o->AsArray();
if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
return false;
}
std::string descriptor(ClassHelper(a->GetClass()).GetDescriptor());
JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
if (IsPrimitiveTag(tag)) {
size_t width = GetTagWidth(tag);
const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData());
uint8_t* dst = expandBufAddSpace(pReply, count * width);
if (width == 8) {
const uint64_t* src8 = reinterpret_cast<const uint64_t*>(src);
for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
} else if (width == 4) {
const uint32_t* src4 = reinterpret_cast<const uint32_t*>(src);
for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
} else if (width == 2) {
const uint16_t* src2 = reinterpret_cast<const uint16_t*>(src);
for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
} else {
memcpy(dst, &src[offset * width], count * width);
}
} else {
ObjectArray<Object>* oa = a->AsObjectArray<Object>();
for (int i = 0; i < count; ++i) {
Object* element = oa->Get(offset + i);
JDWP::JdwpTag specific_tag = (element != NULL) ? TagFromObject(element) : tag;
expandBufAdd1(pReply, specific_tag);
expandBufAddObjectId(pReply, gRegistry->Add(element));
}
}
return true;
}
bool Dbg::SetArrayElements(JDWP::ObjectId arrayId, int offset, int count, const uint8_t* src) {
Object* o = gRegistry->Get<Object*>(arrayId);
Array* a = o->AsArray();
if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
return false;
}
std::string descriptor(ClassHelper(a->GetClass()).GetDescriptor());
JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
if (IsPrimitiveTag(tag)) {
size_t width = GetTagWidth(tag);
uint8_t* dst = &(reinterpret_cast<uint8_t*>(a->GetRawData())[offset * width]);
if (width == 8) {
for (int i = 0; i < count; ++i) {
// Handle potentially non-aligned memory access one byte at a time for ARM's benefit.
uint64_t value;
for (size_t j = 0; j < sizeof(uint64_t); ++j) reinterpret_cast<uint8_t*>(&value)[j] = src[j];
src += sizeof(uint64_t);
JDWP::Write8BE(&dst, value);
}
} else if (width == 4) {
const uint32_t* src4 = reinterpret_cast<const uint32_t*>(src);
for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[i]);
} else if (width == 2) {
const uint16_t* src2 = reinterpret_cast<const uint16_t*>(src);
for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[i]);
} else {
memcpy(&dst[offset * width], src, count * width);
}
} else {
ObjectArray<Object>* oa = a->AsObjectArray<Object>();
for (int i = 0; i < count; ++i) {
JDWP::ObjectId id = JDWP::ReadObjectId(&src);
oa->Set(offset + i, gRegistry->Get<Object*>(id));
}
}
return true;
}
JDWP::ObjectId Dbg::CreateString(const std::string& str) {
return gRegistry->Add(String::AllocFromModifiedUtf8(str.c_str()));
}
bool Dbg::CreateObject(JDWP::RefTypeId classId, JDWP::ObjectId& new_object) {
Object* o = gRegistry->Get<Object*>(classId);
if (o == NULL || !o->IsClass()) {
return false;
}
new_object = gRegistry->Add(o->AsClass()->AllocObject());
return true;
}
/*
* Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
*/
bool Dbg::CreateArrayObject(JDWP::RefTypeId arrayTypeId, uint32_t length, JDWP::ObjectId& new_array) {
Object* o = gRegistry->Get<Object*>(arrayTypeId);
if (o == NULL || !o->IsClass()) {
return false;
}
new_array = gRegistry->Add(Array::Alloc(o->AsClass(), length));
return true;
}
bool Dbg::MatchType(JDWP::RefTypeId instClassId, JDWP::RefTypeId classId) {
// TODO: error handling if the RefTypeIds aren't actually Class*s.
return gRegistry->Get<Class*>(instClassId)->InstanceOf(gRegistry->Get<Class*>(classId));
}
JDWP::FieldId ToFieldId(Field* f) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
#else
return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
#endif
}
JDWP::MethodId ToMethodId(Method* m) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
#else
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
#endif
}
Field* FromFieldId(JDWP::FieldId fid) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
#else
return reinterpret_cast<Field*>(static_cast<uintptr_t>(fid));
#endif
}
Method* FromMethodId(JDWP::MethodId mid) {
#ifdef MOVING_GARBAGE_COLLECTOR
UNIMPLEMENTED(FATAL);
#else
return reinterpret_cast<Method*>(static_cast<uintptr_t>(mid));
#endif
}
void SetLocation(JDWP::JdwpLocation& location, Method* m, uintptr_t native_pc) {
Class* c = m->GetDeclaringClass();
location.typeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
location.classId = gRegistry->Add(c);
location.methodId = ToMethodId(m);
location.idx = m->IsNative() ? -1 : m->ToDexPC(native_pc);
}
std::string Dbg::GetMethodName(JDWP::RefTypeId refTypeId, JDWP::MethodId methodId) {
Method* m = FromMethodId(methodId);
return MethodHelper(m).GetName();
}
/*
* Augment the access flags for synthetic methods and fields by setting
* the (as described by the spec) "0xf0000000 bit". Also, strip out any
* flags not specified by the Java programming language.
*/
static uint32_t MangleAccessFlags(uint32_t accessFlags) {
accessFlags &= kAccJavaFlagsMask;
if ((accessFlags & kAccSynthetic) != 0) {
accessFlags |= 0xf0000000;
}
return accessFlags;
}
static const uint16_t kEclipseWorkaroundSlot = 1000;
/*
* Eclipse appears to expect that the "this" reference is in slot zero.
* If it's not, the "variables" display will show two copies of "this",
* possibly because it gets "this" from SF.ThisObject and then displays
* all locals with nonzero slot numbers.
*
* So, we remap the item in slot 0 to 1000, and remap "this" to zero. On
* SF.GetValues / SF.SetValues we map them back.
*
* TODO: jdb uses the value to determine whether a variable is a local or an argument,
* by checking whether it's less than the number of arguments. To make that work, we'd
* have to "mangle" all the arguments to come first, not just the implicit argument 'this'.
*/
static uint16_t MangleSlot(uint16_t slot, const char* name) {
uint16_t newSlot = slot;
if (strcmp(name, "this") == 0) {
newSlot = 0;
} else if (slot == 0) {
newSlot = kEclipseWorkaroundSlot;
}
return newSlot;
}
static uint16_t DemangleSlot(uint16_t slot, Frame& f) {
if (slot == kEclipseWorkaroundSlot) {
return 0;
} else if (slot == 0) {
const DexFile::CodeItem* code_item = MethodHelper(f.GetMethod()).GetCodeItem();
return code_item->registers_size_ - code_item->ins_size_;
}
return slot;
}
bool Dbg::OutputDeclaredFields(JDWP::RefTypeId refTypeId, bool with_generic, JDWP::ExpandBuf* pReply) {
Object* o = gRegistry->Get<Object*>(refTypeId);
if (o == NULL || !o->IsClass()) {
return false;
}
Class* c = o->AsClass();
size_t instance_field_count = c->NumInstanceFields();
size_t static_field_count = c->NumStaticFields();
expandBufAdd4BE(pReply, instance_field_count + static_field_count);
for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
Field* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
FieldHelper fh(f);
expandBufAddFieldId(pReply, ToFieldId(f));
expandBufAddUtf8String(pReply, fh.GetName());
expandBufAddUtf8String(pReply, fh.GetTypeDescriptor());
if (with_generic) {
static const char genericSignature[1] = "";
expandBufAddUtf8String(pReply, genericSignature);
}
expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
}
return true;
}
bool Dbg::OutputDeclaredMethods(JDWP::RefTypeId refTypeId, bool with_generic, JDWP::ExpandBuf* pReply) {
Object* o = gRegistry->Get<Object*>(refTypeId);
if (o == NULL || !o->IsClass()) {
return false;
}
Class* c = o->AsClass();
size_t direct_method_count = c->NumDirectMethods();
size_t virtual_method_count = c->NumVirtualMethods();
expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
Method* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
MethodHelper mh(m);
expandBufAddMethodId(pReply, ToMethodId(m));
expandBufAddUtf8String(pReply, mh.GetName());
expandBufAddUtf8String(pReply, mh.GetSignature());
if (with_generic) {
static const char genericSignature[1] = "";
expandBufAddUtf8String(pReply, genericSignature);
}
expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
}
return true;
}
bool Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId refTypeId, JDWP::ExpandBuf* pReply) {
Object* o = gRegistry->Get<Object*>(refTypeId);
if (o == NULL || !o->IsClass()) {
return false;
}
ClassHelper kh(o->AsClass());
size_t interface_count = kh.NumInterfaces();
expandBufAdd4BE(pReply, interface_count);
for (size_t i = 0; i < interface_count; ++i) {
expandBufAddRefTypeId(pReply, gRegistry->Add(kh.GetInterface(i)));
}
return true;
}
void Dbg::OutputLineTable(JDWP::RefTypeId refTypeId, JDWP::MethodId methodId, JDWP::ExpandBuf* pReply) {
struct DebugCallbackContext {
int numItems;
JDWP::ExpandBuf* pReply;
static bool Callback(void* context, uint32_t address, uint32_t lineNum) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
expandBufAdd8BE(pContext->pReply, address);
expandBufAdd4BE(pContext->pReply, lineNum);
pContext->numItems++;
return true;
}
};
Method* m = FromMethodId(methodId);
MethodHelper mh(m);
uint64_t start, end;
if (m->IsNative()) {
start = -1;
end = -1;
} else {
start = 0;
// TODO: what are the units supposed to be? *2?
end = mh.GetCodeItem()->insns_size_in_code_units_;
}
expandBufAdd8BE(pReply, start);
expandBufAdd8BE(pReply, end);
// Add numLines later
size_t numLinesOffset = expandBufGetLength(pReply);
expandBufAdd4BE(pReply, 0);
DebugCallbackContext context;
context.numItems = 0;
context.pReply = pReply;
mh.GetDexFile().DecodeDebugInfo(mh.GetCodeItem(), m->IsStatic(), m->GetDexMethodIndex(),
DebugCallbackContext::Callback, NULL, &context);
JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
}
void Dbg::OutputVariableTable(JDWP::RefTypeId refTypeId, JDWP::MethodId methodId, bool with_generic, JDWP::ExpandBuf* pReply) {
struct DebugCallbackContext {
JDWP::ExpandBuf* pReply;
size_t variable_count;
bool with_generic;
static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, const char* name, const char* descriptor, const char* signature) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' slot=%d", pContext->variable_count, startAddress, endAddress - startAddress, name, descriptor, signature, slot);
slot = MangleSlot(slot, name);
expandBufAdd8BE(pContext->pReply, startAddress);
expandBufAddUtf8String(pContext->pReply, name);
expandBufAddUtf8String(pContext->pReply, descriptor);
if (pContext->with_generic) {
expandBufAddUtf8String(pContext->pReply, signature);
}
expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
expandBufAdd4BE(pContext->pReply, slot);
++pContext->variable_count;
}
};
Method* m = FromMethodId(methodId);
MethodHelper mh(m);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
// arg_count considers doubles and longs to take 2 units.
// variable_count considers everything to take 1 unit.
std::string shorty(mh.GetShorty());
expandBufAdd4BE(pReply, m->NumArgRegisters(shorty));
// We don't know the total number of variables yet, so leave a blank and update it later.
size_t variable_count_offset = expandBufGetLength(pReply);
expandBufAdd4BE(pReply, 0);
DebugCallbackContext context;
context.pReply = pReply;
context.variable_count = 0;
context.with_generic = with_generic;
mh.GetDexFile().DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL,
DebugCallbackContext::Callback, &context);
JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
}
JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId fieldId) {
return BasicTagFromDescriptor(FieldHelper(FromFieldId(fieldId)).GetTypeDescriptor());
}
JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId fieldId) {
return BasicTagFromDescriptor(FieldHelper(FromFieldId(fieldId)).GetTypeDescriptor());
}
void Dbg::GetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply) {
Object* o = gRegistry->Get<Object*>(objectId);
Field* f = FromFieldId(fieldId);
JDWP::JdwpTag tag = BasicTagFromDescriptor(FieldHelper(f).GetTypeDescriptor());
if (IsPrimitiveTag(tag)) {
expandBufAdd1(pReply, tag);
if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
expandBufAdd1(pReply, f->Get32(o));
} else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
expandBufAdd2BE(pReply, f->Get32(o));
} else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
expandBufAdd4BE(pReply, f->Get32(o));
} else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
expandBufAdd8BE(pReply, f->Get64(o));
} else {
LOG(FATAL) << "Unknown tag: " << tag;
}
} else {
Object* value = f->GetObject(o);
expandBufAdd1(pReply, TagFromObject(value));
expandBufAddObjectId(pReply, gRegistry->Add(value));
}
}
void Dbg::SetFieldValue(JDWP::ObjectId objectId, JDWP::FieldId fieldId, uint64_t value, int width) {
Object* o = gRegistry->Get<Object*>(objectId);
Field* f = FromFieldId(fieldId);
JDWP::JdwpTag tag = BasicTagFromDescriptor(FieldHelper(f).GetTypeDescriptor());
if (IsPrimitiveTag(tag)) {
if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
f->Set64(o, value);
} else {
f->Set32(o, value);
}
} else {
f->SetObject(o, gRegistry->Get<Object*>(value));
}
}
void Dbg::GetStaticFieldValue(JDWP::FieldId fieldId, JDWP::ExpandBuf* pReply) {
GetFieldValue(0, fieldId, pReply);
}
void Dbg::SetStaticFieldValue(JDWP::FieldId fieldId, uint64_t value, int width) {
SetFieldValue(0, fieldId, value, width);
}
std::string Dbg::StringToUtf8(JDWP::ObjectId strId) {
String* s = gRegistry->Get<String*>(strId);
return s->ToModifiedUtf8();
}
Thread* DecodeThread(JDWP::ObjectId threadId) {
Object* thread_peer = gRegistry->Get<Object*>(threadId);
CHECK(thread_peer != NULL);
return Thread::FromManagedThread(thread_peer);
}
bool Dbg::GetThreadName(JDWP::ObjectId threadId, std::string& name) {
ScopedThreadListLock thread_list_lock;
Thread* thread = DecodeThread(threadId);
if (thread == NULL) {
return false;
}
StringAppendF(&name, "<%d> %s", thread->GetThinLockId(), thread->GetThreadName()->ToModifiedUtf8().c_str());
return true;
}
JDWP::ObjectId Dbg::GetThreadGroup(JDWP::ObjectId threadId) {
Object* thread = gRegistry->Get<Object*>(threadId);
CHECK(thread != NULL);
Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/Thread;");
CHECK(c != NULL);
Field* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
CHECK(f != NULL);
Object* group = f->GetObject(thread);
CHECK(group != NULL);
return gRegistry->Add(group);
}
std::string Dbg::GetThreadGroupName(JDWP::ObjectId threadGroupId) {
Object* thread_group = gRegistry->Get<Object*>(threadGroupId);
CHECK(thread_group != NULL);
Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
CHECK(c != NULL);
Field* f = c->FindInstanceField("name", "Ljava/lang/String;");
CHECK(f != NULL);
String* s = reinterpret_cast<String*>(f->GetObject(thread_group));
return s->ToModifiedUtf8();
}
JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId threadGroupId) {
Object* thread_group = gRegistry->Get<Object*>(threadGroupId);
CHECK(thread_group != NULL);
Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
CHECK(c != NULL);
Field* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
CHECK(f != NULL);
Object* parent = f->GetObject(thread_group);
return gRegistry->Add(parent);
}
static Object* GetStaticThreadGroup(const char* field_name) {
Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
CHECK(c != NULL);
Field* f = c->FindStaticField(field_name, "Ljava/lang/ThreadGroup;");
CHECK(f != NULL);
Object* group = f->GetObject(NULL);
CHECK(group != NULL);
return group;
}
JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
return gRegistry->Add(GetStaticThreadGroup("mSystem"));
}
JDWP::ObjectId Dbg::GetMainThreadGroupId() {
return gRegistry->Add(GetStaticThreadGroup("mMain"));
}
bool Dbg::GetThreadStatus(JDWP::ObjectId threadId, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) {
ScopedThreadListLock thread_list_lock;
Thread* thread = DecodeThread(threadId);
if (thread == NULL) {
return false;
}
switch (thread->GetState()) {
case Thread::kTerminated: *pThreadStatus = JDWP::TS_ZOMBIE; break;
case Thread::kRunnable: *pThreadStatus = JDWP::TS_RUNNING; break;
case Thread::kTimedWaiting: *pThreadStatus = JDWP::TS_SLEEPING; break;
case Thread::kBlocked: *pThreadStatus = JDWP::TS_MONITOR; break;
case Thread::kWaiting: *pThreadStatus = JDWP::TS_WAIT; break;
case Thread::kInitializing: *pThreadStatus = JDWP::TS_ZOMBIE; break;
case Thread::kStarting: *pThreadStatus = JDWP::TS_ZOMBIE; break;
case Thread::kNative: *pThreadStatus = JDWP::TS_RUNNING; break;
case Thread::kVmWait: *pThreadStatus = JDWP::TS_WAIT; break;
case Thread::kSuspended: *pThreadStatus = JDWP::TS_RUNNING; break;
default:
LOG(FATAL) << "Unknown thread state " << thread->GetState();
}
*pSuspendStatus = (thread->IsSuspended() ? JDWP::SUSPEND_STATUS_SUSPENDED : JDWP::SUSPEND_STATUS_NOT_SUSPENDED);
return true;
}
uint32_t Dbg::GetThreadSuspendCount(JDWP::ObjectId threadId) {
return DecodeThread(threadId)->GetSuspendCount();
}
bool Dbg::ThreadExists(JDWP::ObjectId threadId) {
return DecodeThread(threadId) != NULL;
}
bool Dbg::IsSuspended(JDWP::ObjectId threadId) {
return DecodeThread(threadId)->IsSuspended();
}
void Dbg::GetThreadGroupThreadsImpl(Object* thread_group, JDWP::ObjectId** ppThreadIds, uint32_t* pThreadCount) {
struct ThreadListVisitor {
static void Visit(Thread* t, void* arg) {
reinterpret_cast<ThreadListVisitor*>(arg)->Visit(t);
}
void Visit(Thread* t) {
if (t == Dbg::GetDebugThread()) {
// Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
// query all threads, so it's easier if we just don't tell them about this thread.
return;
}
if (thread_group == NULL || t->GetThreadGroup() == thread_group) {
threads.push_back(gRegistry->Add(t->GetPeer()));
}
}
Object* thread_group;
std::vector<JDWP::ObjectId> threads;
};
ThreadListVisitor tlv;
tlv.thread_group = thread_group;
{
ScopedThreadListLock thread_list_lock;
Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv);
}
*pThreadCount = tlv.threads.size();
if (*pThreadCount == 0) {
*ppThreadIds = NULL;
} else {
*ppThreadIds = new JDWP::ObjectId[*pThreadCount];
for (size_t i = 0; i < *pThreadCount; ++i) {
(*ppThreadIds)[i] = tlv.threads[i];
}
}
}
void Dbg::GetThreadGroupThreads(JDWP::ObjectId threadGroupId, JDWP::ObjectId** ppThreadIds, uint32_t* pThreadCount) {
GetThreadGroupThreadsImpl(gRegistry->Get<Object*>(threadGroupId), ppThreadIds, pThreadCount);
}
void Dbg::GetAllThreads(JDWP::ObjectId** ppThreadIds, uint32_t* pThreadCount) {
GetThreadGroupThreadsImpl(NULL, ppThreadIds, pThreadCount);
}
int Dbg::GetThreadFrameCount(JDWP::ObjectId threadId) {
ScopedThreadListLock thread_list_lock;
struct CountStackDepthVisitor : public Thread::StackVisitor {
CountStackDepthVisitor() : depth(0) {}
virtual void VisitFrame(const Frame& f, uintptr_t) {
// TODO: we'll need to skip callee-save frames too.
if (f.HasMethod()) {
++depth;
}
}
size_t depth;
};
CountStackDepthVisitor visitor;
DecodeThread(threadId)->WalkStack(&visitor);
return visitor.depth;
}
bool Dbg::GetThreadFrame(JDWP::ObjectId threadId, int desired_frame_number, JDWP::FrameId* pFrameId, JDWP::JdwpLocation* pLoc) {
ScopedThreadListLock thread_list_lock;
struct GetFrameVisitor : public Thread::StackVisitor {
GetFrameVisitor(int desired_frame_number, JDWP::FrameId* pFrameId, JDWP::JdwpLocation* pLoc)
: found(false), depth(0), desired_frame_number(desired_frame_number), pFrameId(pFrameId), pLoc(pLoc) {
}
virtual void VisitFrame(const Frame& f, uintptr_t pc) {
// TODO: we'll need to skip callee-save frames too.
if (!f.HasMethod()) {
return; // The debugger can't do anything useful with a frame that has no Method*.
}
if (depth == desired_frame_number) {
*pFrameId = reinterpret_cast<JDWP::FrameId>(f.GetSP());
SetLocation(*pLoc, f.GetMethod(), pc);
found = true;
}
++depth;
}
bool found;
int depth;
int desired_frame_number;
JDWP::FrameId* pFrameId;
JDWP::JdwpLocation* pLoc;
};
GetFrameVisitor visitor(desired_frame_number, pFrameId, pLoc);
visitor.desired_frame_number = desired_frame_number;
DecodeThread(threadId)->WalkStack(&visitor);
return visitor.found;
}
JDWP::ObjectId Dbg::GetThreadSelfId() {
return gRegistry->Add(Thread::Current()->GetPeer());
}
void Dbg::SuspendVM() {
ScopedThreadStateChange tsc(Thread::Current(), Thread::kRunnable); // TODO: do we really want to change back? should the JDWP thread be Runnable usually?
Runtime::Current()->GetThreadList()->SuspendAll(true);
}
void Dbg::ResumeVM() {
Runtime::Current()->GetThreadList()->ResumeAll(true);
}
void Dbg::SuspendThread(JDWP::ObjectId threadId) {
Object* peer = gRegistry->Get<Object*>(threadId);
ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(peer);
if (thread == NULL) {
LOG(WARNING) << "No such thread for suspend: " << peer;
return;
}
Runtime::Current()->GetThreadList()->Suspend(thread, true);
}
void Dbg::ResumeThread(JDWP::ObjectId threadId) {
Object* peer = gRegistry->Get<Object*>(threadId);
ScopedThreadListLock thread_list_lock;
Thread* thread = Thread::FromManagedThread(peer);
if (thread == NULL) {
LOG(WARNING) << "No such thread for resume: " << peer;
return;
}
Runtime::Current()->GetThreadList()->Resume(thread, true);
}
void Dbg::SuspendSelf() {
Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
}
bool Dbg::GetThisObject(JDWP::FrameId frameId, JDWP::ObjectId* pThisId) {
Method** sp = reinterpret_cast<Method**>(frameId);
Frame f;
f.SetSP(sp);
Method* m = f.GetMethod();
Object* o = NULL;
if (!m->IsNative() && !m->IsStatic()) {
uint16_t reg = DemangleSlot(0, f);
o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
}
*pThisId = gRegistry->Add(o);
return true;
}
void Dbg::GetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
Method** sp = reinterpret_cast<Method**>(frameId);
Frame f;
f.SetSP(sp);
uint16_t reg = DemangleSlot(slot, f);
Method* m = f.GetMethod();
const VmapTable vmap_table(m->GetVmapTableRaw());
uint32_t vmap_offset;
if (vmap_table.IsInContext(reg, vmap_offset)) {
UNIMPLEMENTED(FATAL) << "Don't know how to pull locals from callee save frames: " << vmap_offset;
}
switch (tag) {
case JDWP::JT_BOOLEAN:
{
CHECK_EQ(width, 1U);
uint32_t intVal = f.GetVReg(m, reg);
VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
JDWP::Set1(buf+1, intVal != 0);
}
break;
case JDWP::JT_BYTE:
{
CHECK_EQ(width, 1U);
uint32_t intVal = f.GetVReg(m, reg);
VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
JDWP::Set1(buf+1, intVal);
}
break;
case JDWP::JT_SHORT:
case JDWP::JT_CHAR:
{
CHECK_EQ(width, 2U);
uint32_t intVal = f.GetVReg(m, reg);
VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
JDWP::Set2BE(buf+1, intVal);
}
break;
case JDWP::JT_INT:
case JDWP::JT_FLOAT:
{
CHECK_EQ(width, 4U);
uint32_t intVal = f.GetVReg(m, reg);
VLOG(jdwp) << "get int/float local " << reg << " = " << intVal;
JDWP::Set4BE(buf+1, intVal);
}
break;
case JDWP::JT_ARRAY:
{
CHECK_EQ(width, sizeof(JDWP::ObjectId));
Object* o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
VLOG(jdwp) << "get array local " << reg << " = " << o;
if (o != NULL && !Heap::IsHeapAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
}
JDWP::SetObjectId(buf+1, gRegistry->Add(o));
}
break;
case JDWP::JT_OBJECT:
{
CHECK_EQ(width, sizeof(JDWP::ObjectId));
Object* o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
VLOG(jdwp) << "get object local " << reg << " = " << o;
if (o != NULL && !Heap::IsHeapAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
}
tag = TagFromObject(o);
JDWP::SetObjectId(buf+1, gRegistry->Add(o));
}
break;
case JDWP::JT_DOUBLE:
case JDWP::JT_LONG:
{
CHECK_EQ(width, 8U);
uint32_t lo = f.GetVReg(m, reg);
uint64_t hi = f.GetVReg(m, reg + 1);
uint64_t longVal = (hi << 32) | lo;
VLOG(jdwp) << "get double/long local " << hi << ":" << lo << " = " << longVal;
JDWP::Set8BE(buf+1, longVal);
}
break;
default:
LOG(FATAL) << "Unknown tag " << tag;
break;
}
// Prepend tag, which may have been updated.
JDWP::Set1(buf, tag);
}
void Dbg::SetLocalValue(JDWP::ObjectId threadId, JDWP::FrameId frameId, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) {
Method** sp = reinterpret_cast<Method**>(frameId);
Frame f;
f.SetSP(sp);
uint16_t reg = DemangleSlot(slot, f);
Method* m = f.GetMethod();
const VmapTable vmap_table(m->GetVmapTableRaw());
uint32_t vmap_offset;
if (vmap_table.IsInContext(reg, vmap_offset)) {
UNIMPLEMENTED(FATAL) << "Don't know how to pull locals from callee save frames: " << vmap_offset;
}
switch (tag) {
case JDWP::JT_BOOLEAN:
case JDWP::JT_BYTE:
CHECK_EQ(width, 1U);
f.SetVReg(m, reg, static_cast<uint32_t>(value));
break;
case JDWP::JT_SHORT:
case JDWP::JT_CHAR:
CHECK_EQ(width, 2U);
f.SetVReg(m, reg, static_cast<uint32_t>(value));
break;
case JDWP::JT_INT:
case JDWP::JT_FLOAT:
CHECK_EQ(width, 4U);
f.SetVReg(m, reg, static_cast<uint32_t>(value));
break;
case JDWP::JT_ARRAY:
case JDWP::JT_OBJECT:
case JDWP::JT_STRING:
{
CHECK_EQ(width, sizeof(JDWP::ObjectId));
Object* o = gRegistry->Get<Object*>(static_cast<JDWP::ObjectId>(value));
f.SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)));
}
break;
case JDWP::JT_DOUBLE:
case JDWP::JT_LONG:
CHECK_EQ(width, 8U);
f.SetVReg(m, reg, static_cast<uint32_t>(value));
f.SetVReg(m, reg + 1, static_cast<uint32_t>(value >> 32));
break;
default:
LOG(FATAL) << "Unknown tag " << tag;
break;
}
}
void Dbg::PostLocationEvent(const Method* method, int pcOffset, Object* thisPtr, int eventFlags) {
UNIMPLEMENTED(FATAL);
}
void Dbg::PostException(Method** sp, Method* throwMethod, uintptr_t throwNativePc, Method* catchMethod, uintptr_t catchNativePc, Object* exception) {
if (!gDebuggerActive) {
return;
}
JDWP::JdwpLocation throw_location;
SetLocation(throw_location, throwMethod, throwNativePc);
JDWP::JdwpLocation catch_location;
SetLocation(catch_location, catchMethod, catchNativePc);
// We need 'this' for InstanceOnly filters.
JDWP::ObjectId this_id;
GetThisObject(reinterpret_cast<JDWP::FrameId>(sp), &this_id);
/*
* Hand the event to the JDWP exception handler. Note we're using the
* "NoReg" objectID on the exception, which is not strictly correct --
* the exception object WILL be passed up to the debugger if the
* debugger is interested in the event. We do this because the current
* implementation of the debugger object registry never throws anything
* away, and some people were experiencing a fatal build up of exception
* objects when dealing with certain libraries.
*/
JDWP::ObjectId exception_id = static_cast<JDWP::ObjectId>(reinterpret_cast<uintptr_t>(exception));
JDWP::RefTypeId exception_class_id = gRegistry->Add(exception->GetClass());
gJdwpState->PostException(&throw_location, exception_id, exception_class_id, &catch_location, this_id);
}
void Dbg::PostClassPrepare(Class* c) {
if (!gDebuggerActive) {
return;
}
// OLD-TODO - we currently always send both "verified" and "prepared" since
// debuggers seem to like that. There might be some advantage to honesty,
// since the class may not yet be verified.
int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
JDWP::JdwpTypeTag tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), ClassHelper(c).GetDescriptor(), state);
}
bool Dbg::WatchLocation(const JDWP::JdwpLocation* pLoc) {
UNIMPLEMENTED(FATAL);
return false;
}
void Dbg::UnwatchLocation(const JDWP::JdwpLocation* pLoc) {
UNIMPLEMENTED(FATAL);
}
bool Dbg::ConfigureStep(JDWP::ObjectId threadId, JDWP::JdwpStepSize size, JDWP::JdwpStepDepth depth) {
UNIMPLEMENTED(FATAL);
return false;
}
void Dbg::UnconfigureStep(JDWP::ObjectId threadId) {
UNIMPLEMENTED(FATAL);
}
JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId threadId, JDWP::ObjectId objectId, JDWP::RefTypeId classId, JDWP::MethodId methodId, uint32_t numArgs, uint64_t* argArray, uint32_t options, JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, JDWP::ObjectId* pExceptionId) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* targetThread = NULL;
DebugInvokeReq* req = NULL;
{
ScopedThreadListLock thread_list_lock;
targetThread = DecodeThread(threadId);
if (targetThread == NULL) {
LOG(ERROR) << "InvokeMethod request for non-existent thread " << threadId;
return JDWP::ERR_INVALID_THREAD;
}
req = targetThread->GetInvokeReq();
if (!req->ready) {
LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
return JDWP::ERR_INVALID_THREAD;
}
/*
* We currently have a bug where we don't successfully resume the
* target thread if the suspend count is too deep. We're expected to
* require one "resume" for each "suspend", but when asked to execute
* a method we have to resume fully and then re-suspend it back to the
* same level. (The easiest way to cause this is to type "suspend"
* multiple times in jdb.)
*
* It's unclear what this means when the event specifies "resume all"
* and some threads are suspended more deeply than others. This is
* a rare problem, so for now we just prevent it from hanging forever
* by rejecting the method invocation request. Without this, we will
* be stuck waiting on a suspended thread.
*/
int suspend_count = targetThread->GetSuspendCount();
if (suspend_count > 1) {
LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here.
}
/*
* OLD-TODO: ought to screen the various IDs, and verify that the argument
* list is valid.
*/
req->receiver_ = gRegistry->Get<Object*>(objectId);
req->thread_ = gRegistry->Get<Object*>(threadId);
req->class_ = gRegistry->Get<Class*>(classId);
req->method_ = FromMethodId(methodId);
req->num_args_ = numArgs;
req->arg_array_ = argArray;
req->options_ = options;
req->invoke_needed_ = true;
}
// The fact that we've released the thread list lock is a bit risky --- if the thread goes
// away we're sitting high and dry -- but we must release this before the ResumeAllThreads
// call, and it's unwise to hold it during WaitForSuspend.
{
/*
* We change our (JDWP thread) status, which should be THREAD_RUNNING,
* so the VM can suspend for a GC if the invoke request causes us to
* run out of memory. It's also a good idea to change it before locking
* the invokeReq mutex, although that should never be held for long.
*/
ScopedThreadStateChange tsc(Thread::Current(), Thread::kVmWait);
VLOG(jdwp) << " Transferring control to event thread";
{
MutexLock mu(req->lock_);
if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
VLOG(jdwp) << " Resuming all threads";
thread_list->ResumeAll(true);
} else {
VLOG(jdwp) << " Resuming event thread only";
thread_list->Resume(targetThread, true);
}
// Wait for the request to finish executing.
while (req->invoke_needed_) {
req->cond_.Wait(req->lock_);
}
}
VLOG(jdwp) << " Control has returned from event thread";
/* wait for thread to re-suspend itself */
targetThread->WaitUntilSuspended();
//dvmWaitForSuspend(targetThread);
}
/*
* Suspend the threads. We waited for the target thread to suspend
* itself, so all we need to do is suspend the others.
*
* The suspendAllThreads() call will double-suspend the event thread,
* so we want to resume the target thread once to keep the books straight.
*/
if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
VLOG(jdwp) << " Suspending all threads";
thread_list->SuspendAll(true);
VLOG(jdwp) << " Resuming event thread to balance the count";
thread_list->Resume(targetThread, true);
}
// Copy the result.
*pResultTag = req->result_tag;
if (IsPrimitiveTag(req->result_tag)) {
*pResultValue = req->result_value.j;
} else {
*pResultValue = gRegistry->Add(req->result_value.l);
}
*pExceptionId = req->exception;
return req->error;
}
void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
Thread* self = Thread::Current();
// We can be called while an exception is pending in the VM. We need
// to preserve that across the method invocation.
SirtRef<Throwable> old_exception(self->GetException());
self->ClearException();
ScopedThreadStateChange tsc(self, Thread::kRunnable);
// Translate the method through the vtable, unless the debugger wants to suppress it.
Method* m = pReq->method_;
VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m);
if ((pReq->options_ & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver_ != NULL) {
m = pReq->class_->FindVirtualMethodForVirtualOrInterface(pReq->method_);
VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m);
}
CHECK(m != NULL);
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
pReq->result_value = InvokeWithJValues(self, pReq->receiver_, m, reinterpret_cast<JValue*>(pReq->arg_array_));
pReq->exception = gRegistry->Add(self->GetException());
pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty());
if (pReq->exception != 0) {
Object* exc = self->GetException();
VLOG(jdwp) << " JDWP invocation returning with exception=" << exc << " " << PrettyTypeOf(exc);
self->ClearException();
pReq->result_value.j = 0;
} else if (pReq->result_tag == JDWP::JT_OBJECT) {
/* if no exception thrown, examine object result more closely */
JDWP::JdwpTag new_tag = TagFromObject(pReq->result_value.l);
if (new_tag != pReq->result_tag) {
VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
pReq->result_tag = new_tag;
}
/*
* Register the object. We don't actually need an ObjectId yet,
* but we do need to be sure that the GC won't move or discard the
* object when we switch out of RUNNING. The ObjectId conversion
* will add the object to the "do not touch" list.
*
* We can't use the "tracked allocation" mechanism here because
* the object is going to be handed off to a different thread.
*/
gRegistry->Add(pReq->result_value.l);
}
if (old_exception.get() != NULL) {
self->SetException(old_exception.get());
}
}
/*
* Register an object ID that might not have been registered previously.
*
* Normally this wouldn't happen -- the conversion to an ObjectId would
* have added the object to the registry -- but in some cases (e.g.
* throwing exceptions) we really want to do the registration late.
*/
void Dbg::RegisterObjectId(JDWP::ObjectId id) {
gRegistry->Add(reinterpret_cast<Object*>(id));
}
/*
* "buf" contains a full JDWP packet, possibly with multiple chunks. We
* need to process each, accumulate the replies, and ship the whole thing
* back.
*
* Returns "true" if we have a reply. The reply buffer is newly allocated,
* and includes the chunk type/length, followed by the data.
*
* OLD-TODO: we currently assume that the request and reply include a single
* chunk. If this becomes inconvenient we will need to adapt.
*/
bool Dbg::DdmHandlePacket(const uint8_t* buf, int dataLen, uint8_t** pReplyBuf, int* pReplyLen) {
CHECK_GE(dataLen, 0);
Thread* self = Thread::Current();
JNIEnv* env = self->GetJniEnv();
static jclass Chunk_class = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
static jclass DdmServer_class = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
static jmethodID dispatch_mid = env->GetStaticMethodID(DdmServer_class, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
static jfieldID data_fid = env->GetFieldID(Chunk_class, "data", "[B");
static jfieldID length_fid = env->GetFieldID(Chunk_class, "length", "I");
static jfieldID offset_fid = env->GetFieldID(Chunk_class, "offset", "I");
static jfieldID type_fid = env->GetFieldID(Chunk_class, "type", "I");
// Create a byte[] corresponding to 'buf'.
ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(dataLen));
if (dataArray.get() == NULL) {
LOG(WARNING) << "byte[] allocation failed: " << dataLen;
env->ExceptionClear();
return false;
}
env->SetByteArrayRegion(dataArray.get(), 0, dataLen, reinterpret_cast<const jbyte*>(buf));
const int kChunkHdrLen = 8;
// Run through and find all chunks. [Currently just find the first.]
ScopedByteArrayRO contents(env, dataArray.get());
jint type = JDWP::Get4BE(reinterpret_cast<const uint8_t*>(&contents[0]));
jint length = JDWP::Get4BE(reinterpret_cast<const uint8_t*>(&contents[4]));
jint offset = kChunkHdrLen;
if (offset + length > dataLen) {
LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%d)", length, dataLen);
return false;
}
// Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(DdmServer_class, dispatch_mid, type, dataArray.get(), offset, length));
if (env->ExceptionCheck()) {
LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
env->ExceptionDescribe();
env->ExceptionClear();
return false;
}
if (chunk.get() == NULL) {
return false;
}
/*
* Pull the pieces out of the chunk. We copy the results into a
* newly-allocated buffer that the caller can free. We don't want to
* continue using the Chunk object because nothing has a reference to it.
*
* We could avoid this by returning type/data/offset/length and having
* the caller be aware of the object lifetime issues, but that
* integrates the JDWP code more tightly into the VM, and doesn't work
* if we have responses for multiple chunks.
*
* So we're pretty much stuck with copying data around multiple times.
*/
ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), data_fid)));
length = env->GetIntField(chunk.get(), length_fid);
offset = env->GetIntField(chunk.get(), offset_fid);
type = env->GetIntField(chunk.get(), type_fid);
VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
if (length == 0 || replyData.get() == NULL) {
return false;
}
jsize replyLength = env->GetArrayLength(replyData.get());
if (offset + length > replyLength) {
LOG(WARNING) << StringPrintf("chunk off=%d len=%d exceeds reply array len %d", offset, length, replyLength);
return false;
}
uint8_t* reply = new uint8_t[length + kChunkHdrLen];
if (reply == NULL) {
LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
return false;
}
JDWP::Set4BE(reply + 0, type);
JDWP::Set4BE(reply + 4, length);
env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
*pReplyBuf = reply;
*pReplyLen = length + kChunkHdrLen;
VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s buf=%p len=%d", reinterpret_cast<char*>(reply), reply, length);
return true;
}
void Dbg::DdmBroadcast(bool connect) {
VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
Thread* self = Thread::Current();
if (self->GetState() != Thread::kRunnable) {
LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
/* try anyway? */
}
JNIEnv* env = self->GetJniEnv();
static jclass DdmServer_class = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
static jmethodID broadcast_mid = env->GetStaticMethodID(DdmServer_class, "broadcast", "(I)V");
jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
env->CallStaticVoidMethod(DdmServer_class, broadcast_mid, event);
if (env->ExceptionCheck()) {
LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
env->ExceptionDescribe();
env->ExceptionClear();
}
}
void Dbg::DdmConnected() {
Dbg::DdmBroadcast(true);
}
void Dbg::DdmDisconnected() {
Dbg::DdmBroadcast(false);
gDdmThreadNotification = false;
}
/*
* Send a notification when a thread starts, stops, or changes its name.
*
* Because we broadcast the full set of threads when the notifications are
* first enabled, it's possible for "thread" to be actively executing.
*/
void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
if (!gDdmThreadNotification) {
return;
}
if (type == CHUNK_TYPE("THDE")) {
uint8_t buf[4];
JDWP::Set4BE(&buf[0], t->GetThinLockId());
Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
} else {
CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
SirtRef<String> name(t->GetThreadName());
size_t char_count = (name.get() != NULL) ? name->GetLength() : 0;
const jchar* chars = name->GetCharArray()->GetData();
std::vector<uint8_t> bytes;
JDWP::Append4BE(bytes, t->GetThinLockId());
JDWP::AppendUtf16BE(bytes, chars, char_count);
CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
Dbg::DdmSendChunk(type, bytes);
}
}
static void DdmSendThreadStartCallback(Thread* t, void*) {
Dbg::DdmSendThreadNotification(t, CHUNK_TYPE("THCR"));
}
void Dbg::DdmSetThreadNotification(bool enable) {
// We lock the thread list to avoid sending duplicate events or missing
// a thread change. We should be okay holding this lock while sending
// the messages out. (We have to hold it while accessing a live thread.)
ScopedThreadListLock thread_list_lock;
gDdmThreadNotification = enable;
if (enable) {
Runtime::Current()->GetThreadList()->ForEach(DdmSendThreadStartCallback, NULL);
}
}
void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
if (gDebuggerActive) {
JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
}
Dbg::DdmSendThreadNotification(t, type);
}
void Dbg::PostThreadStart(Thread* t) {
Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
}
void Dbg::PostThreadDeath(Thread* t) {
Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
}
void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
CHECK(buf != NULL);
iovec vec[1];
vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
vec[0].iov_len = byte_count;
Dbg::DdmSendChunkV(type, vec, 1);
}
void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
DdmSendChunk(type, bytes.size(), &bytes[0]);
}
void Dbg::DdmSendChunkV(uint32_t type, const struct iovec* iov, int iov_count) {
if (gJdwpState == NULL) {
VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
} else {
gJdwpState->DdmSendChunkV(type, iov, iov_count);
}
}
int Dbg::DdmHandleHpifChunk(HpifWhen when) {
if (when == HPIF_WHEN_NOW) {
DdmSendHeapInfo(when);
return true;
}
if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
return false;
}
gDdmHpifWhen = when;
return true;
}
bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
return false;
}
if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
return false;
}
if (native) {
gDdmNhsgWhen = when;
gDdmNhsgWhat = what;
} else {
gDdmHpsgWhen = when;
gDdmHpsgWhat = what;
}
return true;
}
void Dbg::DdmSendHeapInfo(HpifWhen reason) {
// If there's a one-shot 'when', reset it.
if (reason == gDdmHpifWhen) {
if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
gDdmHpifWhen = HPIF_WHEN_NEVER;
}
}
/*
* Chunk HPIF (client --> server)
*
* Heap Info. General information about the heap,
* suitable for a summary display.
*
* [u4]: number of heaps
*
* For each heap:
* [u4]: heap ID
* [u8]: timestamp in ms since Unix epoch
* [u1]: capture reason (same as 'when' value from server)
* [u4]: max heap size in bytes (-Xmx)
* [u4]: current heap size in bytes
* [u4]: current number of bytes allocated
* [u4]: current number of objects allocated
*/
uint8_t heap_count = 1;
std::vector<uint8_t> bytes;
JDWP::Append4BE(bytes, heap_count);
JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
JDWP::Append8BE(bytes, MilliTime());
JDWP::Append1BE(bytes, reason);
JDWP::Append4BE(bytes, Heap::GetMaxMemory()); // Max allowed heap size in bytes.
JDWP::Append4BE(bytes, Heap::GetTotalMemory()); // Current heap size in bytes.
JDWP::Append4BE(bytes, Heap::GetBytesAllocated());
JDWP::Append4BE(bytes, Heap::GetObjectsAllocated());
CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
}
enum HpsgSolidity {
SOLIDITY_FREE = 0,
SOLIDITY_HARD = 1,
SOLIDITY_SOFT = 2,
SOLIDITY_WEAK = 3,
SOLIDITY_PHANTOM = 4,
SOLIDITY_FINALIZABLE = 5,
SOLIDITY_SWEEP = 6,
};
enum HpsgKind {
KIND_OBJECT = 0,
KIND_CLASS_OBJECT = 1,
KIND_ARRAY_1 = 2,
KIND_ARRAY_2 = 3,
KIND_ARRAY_4 = 4,
KIND_ARRAY_8 = 5,
KIND_UNKNOWN = 6,
KIND_NATIVE = 7,
};
#define HPSG_PARTIAL (1<<7)
#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
struct HeapChunkContext {
std::vector<uint8_t> buf;
uint8_t* p;
uint8_t* pieceLenField;
size_t totalAllocationUnits;
uint32_t type;
bool merge;
bool needHeader;
// Maximum chunk size. Obtain this from the formula:
// (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge, bool native)
: buf(16384 - 16),
type(0),
merge(merge) {
Reset();
if (native) {
type = CHUNK_TYPE("NHSG");
} else {
type = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
}
}
~HeapChunkContext() {
if (p > &buf[0]) {
Flush();
}
}
void EnsureHeader(const void* chunk_ptr) {
if (!needHeader) {
return;
}
// Start a new HPSx chunk.
JDWP::Write4BE(&p, 1); // Heap id (bogus; we only have one heap).
JDWP::Write1BE(&p, 8); // Size of allocation unit, in bytes.
JDWP::Write4BE(&p, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start.
JDWP::Write4BE(&p, 0); // offset of this piece (relative to the virtual address).
// [u4]: length of piece, in allocation units
// We won't know this until we're done, so save the offset and stuff in a dummy value.
pieceLenField = p;
JDWP::Write4BE(&p, 0x55555555);
needHeader = false;
}
void Flush() {
// Patch the "length of piece" field.
CHECK_LE(&buf[0], pieceLenField);
CHECK_LE(pieceLenField, p);
JDWP::Set4BE(pieceLenField, totalAllocationUnits);
Dbg::DdmSendChunk(type, p - &buf[0], &buf[0]);
Reset();
}
static void HeapChunkCallback(const void* chunk_ptr, size_t chunk_len, const void* user_ptr, size_t user_len, void* arg) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(chunk_ptr, chunk_len, user_ptr, user_len);
}
private:
enum { ALLOCATION_UNIT_SIZE = 8 };
void Reset() {
p = &buf[0];
totalAllocationUnits = 0;
needHeader = true;
pieceLenField = NULL;
}
void HeapChunkCallback(const void* chunk_ptr, size_t chunk_len, const void* user_ptr, size_t user_len) {
CHECK_EQ((chunk_len & (ALLOCATION_UNIT_SIZE-1)), 0U);
/* Make sure there's enough room left in the buffer.
* We need to use two bytes for every fractional 256
* allocation units used by the chunk.
*/
{
size_t needed = (((chunk_len/ALLOCATION_UNIT_SIZE + 255) / 256) * 2);
size_t bytesLeft = buf.size() - (size_t)(p - &buf[0]);
if (bytesLeft < needed) {
Flush();
}
bytesLeft = buf.size() - (size_t)(p - &buf[0]);
if (bytesLeft < needed) {
LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << chunk_len << ", " << needed << " bytes)";
return;
}
}
// OLD-TODO: notice when there's a gap and start a new heap, or at least a new range.
EnsureHeader(chunk_ptr);
// Determine the type of this chunk.
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
// If it's the same, we should combine them.
uint8_t state = ExamineObject(reinterpret_cast<const Object*>(user_ptr), (type == CHUNK_TYPE("NHSG")));
// Write out the chunk description.
chunk_len /= ALLOCATION_UNIT_SIZE; // convert to allocation units
totalAllocationUnits += chunk_len;
while (chunk_len > 256) {
*p++ = state | HPSG_PARTIAL;
*p++ = 255; // length - 1
chunk_len -= 256;
}
*p++ = state;
*p++ = chunk_len - 1;
}
uint8_t ExamineObject(const Object* o, bool is_native_heap) {
if (o == NULL) {
return HPSG_STATE(SOLIDITY_FREE, 0);
}
// It's an allocated chunk. Figure out what it is.
// If we're looking at the native heap, we'll just return
// (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
if (is_native_heap || !Heap::IsLiveObjectLocked(o)) {
return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
Class* c = o->GetClass();
if (c == NULL) {
// The object was probably just created but hasn't been initialized yet.
return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
if (!Heap::IsHeapAddress(c)) {
LOG(WARNING) << "Invalid class for managed heap object: " << o << " " << c;
return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
}
if (c->IsClassClass()) {
return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
}
if (c->IsArrayClass()) {
if (o->IsObjectArray()) {
return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
}
switch (c->GetComponentSize()) {
case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
}
}
return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
};
void Dbg::DdmSendHeapSegments(bool native) {
Dbg::HpsgWhen when;
Dbg::HpsgWhat what;
if (!native) {
when = gDdmHpsgWhen;
what = gDdmHpsgWhat;
} else {
when = gDdmNhsgWhen;
what = gDdmNhsgWhat;
}
if (when == HPSG_WHEN_NEVER) {
return;
}
// Figure out what kind of chunks we'll be sending.
CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
// First, send a heap start chunk.
uint8_t heap_id[4];
JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
if (native) {
dlmalloc_walk_heap(HeapChunkContext::HeapChunkCallback, &context);
} else {
Heap::WalkHeap(HeapChunkContext::HeapChunkCallback, &context);
}
// Finally, send a heap end chunk.
Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
}
void Dbg::SetAllocTrackingEnabled(bool enabled) {
MutexLock mu(gAllocTrackerLock);
if (enabled) {
if (recent_allocation_records_ == NULL) {
LOG(INFO) << "Enabling alloc tracker (" << kNumAllocRecords << " entries, "
<< kMaxAllocRecordStackDepth << " frames --> "
<< (sizeof(AllocRecord) * kNumAllocRecords) << " bytes)";
gAllocRecordHead = gAllocRecordCount = 0;
recent_allocation_records_ = new AllocRecord[kNumAllocRecords];
CHECK(recent_allocation_records_ != NULL);
}
} else {
delete[] recent_allocation_records_;
recent_allocation_records_ = NULL;
}
}
struct AllocRecordStackVisitor : public Thread::StackVisitor {
explicit AllocRecordStackVisitor(AllocRecord* record) : record(record), depth(0) {
}
virtual void VisitFrame(const Frame& f, uintptr_t pc) {
if (depth >= kMaxAllocRecordStackDepth) {
return;
}
Method* m = f.GetMethod();
if (m == NULL || m->IsCalleeSaveMethod()) {
return;
}
record->stack[depth].method = m;
record->stack[depth].raw_pc = pc;
++depth;
}
~AllocRecordStackVisitor() {
// Clear out any unused stack trace elements.
for (; depth < kMaxAllocRecordStackDepth; ++depth) {
record->stack[depth].method = NULL;
record->stack[depth].raw_pc = 0;
}
}
AllocRecord* record;
size_t depth;
};
void Dbg::RecordAllocation(Class* type, size_t byte_count) {
Thread* self = Thread::Current();
CHECK(self != NULL);
MutexLock mu(gAllocTrackerLock);
if (recent_allocation_records_ == NULL) {
return;
}
// Advance and clip.
if (++gAllocRecordHead == kNumAllocRecords) {
gAllocRecordHead = 0;
}
// Fill in the basics.
AllocRecord* record = &recent_allocation_records_[gAllocRecordHead];
record->type = type;
record->byte_count = byte_count;
record->thin_lock_id = self->GetThinLockId();
// Fill in the stack trace.
AllocRecordStackVisitor visitor(record);
self->WalkStack(&visitor);
if (gAllocRecordCount < kNumAllocRecords) {
++gAllocRecordCount;
}
}
/*
* Return the index of the head element.
*
* We point at the most-recently-written record, so if allocRecordCount is 1
* we want to use the current element. Take "head+1" and subtract count
* from it.
*
* We need to handle underflow in our circular buffer, so we add
* kNumAllocRecords and then mask it back down.
*/
inline static int headIndex() {
return (gAllocRecordHead+1 + kNumAllocRecords - gAllocRecordCount) & (kNumAllocRecords-1);
}
void Dbg::DumpRecentAllocations() {
MutexLock mu(gAllocTrackerLock);
if (recent_allocation_records_ == NULL) {
LOG(INFO) << "Not recording tracked allocations";
return;
}
// "i" is the head of the list. We want to start at the end of the
// list and move forward to the tail.
size_t i = headIndex();
size_t count = gAllocRecordCount;
LOG(INFO) << "Tracked allocations, (head=" << gAllocRecordHead << " count=" << count << ")";
while (count--) {
AllocRecord* record = &recent_allocation_records_[i];
LOG(INFO) << StringPrintf(" T=%-2d %6zd ", record->thin_lock_id, record->byte_count)
<< PrettyClass(record->type);
for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
const Method* m = record->stack[stack_frame].method;
if (m == NULL) {
break;
}
LOG(INFO) << " " << PrettyMethod(m) << " line " << record->stack[stack_frame].LineNumber();
}
// pause periodically to help logcat catch up
if ((count % 5) == 0) {
usleep(40000);
}
i = (i + 1) & (kNumAllocRecords-1);
}
}
class StringTable {
public:
StringTable() {
}
void Add(const char* s) {
table_.insert(s);
}
size_t IndexOf(const char* s) {
return std::distance(table_.begin(), table_.find(s));
}
size_t Size() {
return table_.size();
}
void WriteTo(std::vector<uint8_t>& bytes) {
typedef std::set<const char*>::const_iterator It; // TODO: C++0x auto
for (It it = table_.begin(); it != table_.end(); ++it) {
const char* s = *it;
size_t s_len = CountModifiedUtf8Chars(s);
UniquePtr<uint16_t> s_utf16(new uint16_t[s_len]);
ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
}
}
private:
std::set<const char*> table_;
DISALLOW_COPY_AND_ASSIGN(StringTable);
};
/*
* The data we send to DDMS contains everything we have recorded.
*
* Message header (all values big-endian):
* (1b) message header len (to allow future expansion); includes itself
* (1b) entry header len
* (1b) stack frame len
* (2b) number of entries
* (4b) offset to string table from start of message
* (2b) number of class name strings
* (2b) number of method name strings
* (2b) number of source file name strings
* For each entry:
* (4b) total allocation size
* (2b) threadId
* (2b) allocated object's class name index
* (1b) stack depth
* For each stack frame:
* (2b) method's class name
* (2b) method name
* (2b) method source file
* (2b) line number, clipped to 32767; -2 if native; -1 if no source
* (xb) class name strings
* (xb) method name strings
* (xb) source file strings
*
* As with other DDM traffic, strings are sent as a 4-byte length
* followed by UTF-16 data.
*
* We send up 16-bit unsigned indexes into string tables. In theory there
* can be (kMaxAllocRecordStackDepth * kNumAllocRecords) unique strings in
* each table, but in practice there should be far fewer.
*
* The chief reason for using a string table here is to keep the size of
* the DDMS message to a minimum. This is partly to make the protocol
* efficient, but also because we have to form the whole thing up all at
* once in a memory buffer.
*
* We use separate string tables for class names, method names, and source
* files to keep the indexes small. There will generally be no overlap
* between the contents of these tables.
*/
jbyteArray Dbg::GetRecentAllocations() {
if (false) {
DumpRecentAllocations();
}
MutexLock mu(gAllocTrackerLock);
/*
* Part 1: generate string tables.
*/
StringTable class_names;
StringTable method_names;
StringTable filenames;
int count = gAllocRecordCount;
int idx = headIndex();
while (count--) {
AllocRecord* record = &recent_allocation_records_[idx];
class_names.Add(ClassHelper(record->type).GetDescriptor());
MethodHelper mh;
for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
Method* m = record->stack[i].method;
mh.ChangeMethod(m);
if (m != NULL) {
class_names.Add(mh.GetDeclaringClassDescriptor());
method_names.Add(mh.GetName());
filenames.Add(mh.GetDeclaringClassSourceFile());
}
}
|
__label__pos
| 0.998672 |
What Are The Disadvantages Of Java?
What are the benefits of Java?
The advantages of Java are as follows:Java is easy to learn.
Java was designed to be easy to use and is therefore easy to write, compile, debug, and learn than other programming languages.Java is object-oriented.
This allows you to create modular programs and reusable code.Java is platform-independent..
What is so special about Java?
One of the main reasons Java is so popular is its platform independence, which means that Java programs can be run on many different types of computers. A Java program runs on any computer with a Java Runtime Environment, also known as a JRE, installed.
Should I learn 2020 Java?
There are numerous reasons why Java continues to be a leader in the world of development and why it is still a language worth learning in 2020. Although Java’s many benefits and capabilities make it a very diverse language, it is actually fairly easy to learn and master for developers of all skill levels.
Does Google use Java?
When it comes to Google, Java is mainly used for coding server and developing the user interface. Java enjoys a full backing of several libraries. JavaScript is a scripting language that is used to make websites more interactive. It is rated among the top languages that are used in Google internally.
What are the 2 types of Java programs?
Types of Java programsJava Applet – small program written in Java and that is downloaded from a website and executed within a web browser on a client computer.Application – executes on a client computer. … JAR file (Java archive) – used to package Java files together into a single file (almost exactly like a .More items…
Why OOPs is important?
Object-oriented programming is often the most natural and pragmatic approach, once you get the hang of it. OOP languages allows you to break down your software into bite-sized problems that you then can solve — one object at a time. This isn’t to say that OOP is the One True Way.
Is OOP better than procedural?
Procedural programming does not have any proper way for hiding data so it is less secure. Object oriented programming provides data hiding so it is more secure. … In procedural programming, function is more important than data. In object oriented programming, data is more important than function.
Who uses Java?
9476 companies reportedly use Java in their tech stacks, including Uber, Airbnb, and Google.Uber.Airbnb.Google.Pinterest.Netflix.Instagram.Spotify.Amazon.
What are the applications of Java?
Scientific Applications:Desktop GUI Applications: Java provides GUI development through various means like Abstract Windowing Toolkit (AWT), Swing and JavaFX. … Mobile Applications: … Embedded Systems: … Web Applications: … Web Servers and Application Servers: … Enterprise Applications: … Scientific Applications:
What are the key features of Java?
What are the major features of Java programming?Object Oriented. In Java, everything is an Object. … Platform Independent. Unlike many other programming languages including C and C++, when Java is compiled, it is not compiled into platform specific machine, rather into platform-independent byte code. … Simple. … Secure. … Architecture-neutral. … Portable. … Robust. … Multithreaded.More items…•
What are the disadvantages of OOP?
Other OOP disadvantages are: 1- Steep learning curve: The thought process involved in OO programming may not be natural for some people, and it will take the time to get used to it. 2- The complexity of creating programs: it is very complex to create programs based on the interaction of objects.
Which language is faster Java or Python?
Java is generally faster and more efficient than Python because it is a compiled language. As an interpreted language, Python has simpler, more concise syntax than Java. It can perform the same function as Java in fewer lines of code.
Is Python better than Java?
Python wins again. Performance is where Java has a substantial advantage over Python. Java’s just-in-time compilation gives it an advantage over Python’s interpreted performance. While neither language is suitable for latency-sensitive applications, Java is still a great deal faster than Python.
What is Java advantages and disadvantages?
Java is straightforward to use, write, compile, debug, and learn than alternative programming languages. Java is less complicated than C++; as a result, Java uses automatic memory allocation and garbage collection.
Is Java a dying language?
Although the TIOBE index has shown Java to be a language in decline, it nevertheless remains comfortably at the top of the table. It might have dropped significantly between 2016 and 2017, but more recently its decline has slowed: it has dropped only 0.92% between October 2018 and October 2019.
Where is Java mostly used?
One of the most widely used programming languages, Java is used as the server-side language for most back-end development projects, including those involving big data and Android development. Java is also commonly used for desktop computing, other mobile computing, games, and numerical computing.
Should I learn Java or Python?
If you’re just interested in programming and want to dip your feet in without going all the way, learn Python for its easier to learn syntax. If you plan to pursue computer science/engineering, I would recommend Java first because it helps you understand the inner workings of programming as well.
What is diamond problem in Java?
Then, if you call the demo() method using the object of the subclass compiler faces an ambiguous situation not knowing which method to call. This issue is known as diamond problem in Java. Due to this Java does not support multiple inheritance i.e., you cannot extend more than one other class.
|
__label__pos
| 0.999615 |
Mersenne Twister Weird Error
Hi,
We have recently got the new conformant OpenCL SDK and we have tried to convert the CUDA SDK Mersenne Twister algorithm and we have encountered a weird error. Where screen is garbled and cannot be taken back, the only solution is to restart the Windows Vista x64. (BTW : We are not registered users :) )
We are giving the following “problematic” code.
#define MT_RNG_COUNT 4096
#define MT_MM 9
#define MT_NN 19
#define MT_WMASK 0xFFFFFFFFU
#define MT_UMASK 0xFFFFFFFEU
#define MT_LMASK 0x1U
#define MT_SHIFT0 12
#define MT_SHIFTB 7
#define MT_SHIFTC 15
#define MT_SHIFT1 18
typedef struct
{
unsigned int matrix_a;
unsigned int mask_b;
unsigned int mask_c;
unsigned int seed;
} mt_struct_stripped;
// OpenCL Kernel Function for element by element vector addition
__kernel void Rand(__global float *d_Random, __constant mt_struct_stripped* ds_MT, __global int NPerRng)
{
const int tid = get_global_id(0);
const int THREAD_N = get_global_size(0);
unsigned int mt[MT_NN];
int iState, iState1, iStateM, iOut, iRng;
unsigned int mti, mti1, mtiM, x;
//Initialize current state
for(iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N)
{
mt_struct_stripped config = ds_MT[iRng];
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(int iOut = 0; iOut < NPerRng; iOut++)
{
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
We have found that this line of code causes the error (we have a workaround for it)
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
The following code is safe but “a lot” slower.
x = mtiM ^ (x >> 1) ^ ((x & 1) ? ds_MT[iRng].matrix_a : 0);
Any comments on this subject may be useful.
Thanks in advance.
Hi,
It’s strange because mersenne Twister works fine for me…
The only difference it’s i don’t use .cl and i put my kernel in a char * with a macro :
#ifndef STRINGIFY
#define STRINGIFY(x) #x “\n”
#endif
So, it’s impossible to me to give structure as paramaters so i pass every paramaters of the structure
An finally it works for me because i make
matrix_a = pmatrix_a[iRng]
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
Thx
J
When I’m using the mt_struct_stripped struct, it results with the error described above. Using the work around described above works but a lot slower.
I’ve tried replacing the struct with a uint4. no error, but the results are wrong.
Finally I’ve splitted all of the variables of the struct to different buffers. (like jonathan81 suggested) This works, correct results and less slow down.
But, we couldnt find why the struct. version of the uint4 version does not work.
I don’t have an answer to your question but I have just coincidentally ported my CUDA-based Mersenne Twister generator to OpenCL. It’s a variant of the CUDA SDK example which, rather than generating a block of random numbers, provides a function which any thread can use to pull a new random number - like rand().
I’ve tested it on NVIDIA’s drivers and it isn’t astoundingly fast - about 1.7B PRNs/second into a GPU register on a GTX 260 - but it should improve as the drivers catch up. It’s also a cute demonstration of using Python to control OpenCL. Here’s the repository if you want to have a look:
http://git.jcornwall.me.uk/MersenneTwisterOCL/
Grab the source with:
git clone git://git.jcornwall.me.uk/MersenneTwisterOCL
We found out why the error happens. We don’t know why but we cannot assign a struct with “mt_struct_stripped config = ds_MT[iRng];”. assigning all the variables separately works though.
|
__label__pos
| 0.781957 |
1 /*
2 * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
24 #define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
25
26 #include "gc/shenandoah/shenandoahAsserts.hpp"
27 #include "gc/shenandoah/shenandoahClosures.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "oops/compressedOops.inline.hpp"
30 #include "runtime/thread.hpp"
31
32 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
33 _mark_context(ShenandoahHeap::heap()->marking_context()) {
34 }
35
36 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
37 if (CompressedOops::is_null(obj)) {
38 return false;
39 }
40 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
41 shenandoah_assert_not_forwarded_if(NULL, obj,
42 (ShenandoahHeap::heap()->is_concurrent_mark_in_progress() ||
43 ShenandoahHeap::heap()->is_concurrent_traversal_in_progress()));
44 return _mark_context->is_marked(obj);
45 }
46
47 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
48 _mark_context(ShenandoahHeap::heap()->marking_context()) {
49 }
50
51 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
52 if (CompressedOops::is_null(obj)) {
53 return false;
54 }
55 shenandoah_assert_not_forwarded(NULL, obj);
56 return _mark_context->is_marked(obj);
57 }
58
59 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
60 return ShenandoahHeap::heap()->has_forwarded_objects() ?
61 reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl) :
62 reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
63 }
64
65 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() :
66 _heap(ShenandoahHeap::heap()) {
67 }
68
69 template <class T>
70 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
71 T o = RawAccess<>::oop_load(p);
72 if (!CompressedOops::is_null(o)) {
73 oop obj = CompressedOops::decode_not_null(o);
74 _heap->update_with_forwarded_not_null(p, obj);
75 }
76 }
77
78 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); }
79 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
80
81 ShenandoahEvacuateUpdateRootsClosure::ShenandoahEvacuateUpdateRootsClosure() :
82 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
83 }
84
85 template <class T>
86 void ShenandoahEvacuateUpdateRootsClosure::do_oop_work(T* p) {
87 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
88
89 T o = RawAccess<>::oop_load(p);
90 if (! CompressedOops::is_null(o)) {
91 oop obj = CompressedOops::decode_not_null(o);
92 if (_heap->in_collection_set(obj)) {
93 shenandoah_assert_marked(p, obj);
94 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
95 if (oopDesc::equals_raw(resolved, obj)) {
96 resolved = _heap->evacuate_object(obj, _thread);
97 }
98 RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
99 }
100 }
101 }
102 void ShenandoahEvacuateUpdateRootsClosure::do_oop(oop* p) {
103 do_oop_work(p);
104 }
105
106 void ShenandoahEvacuateUpdateRootsClosure::do_oop(narrowOop* p) {
107 do_oop_work(p);
108 }
109
110 ShenandoahEvacUpdateOopStorageRootsClosure::ShenandoahEvacUpdateOopStorageRootsClosure() :
111 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
112 }
113
114 void ShenandoahEvacUpdateOopStorageRootsClosure::do_oop(oop* p) {
115 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
116
117 oop obj = RawAccess<>::oop_load(p);
118 if (! CompressedOops::is_null(obj)) {
119 if (_heap->in_collection_set(obj)) {
120 shenandoah_assert_marked(p, obj);
121 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
122 if (oopDesc::equals_raw(resolved, obj)) {
123 resolved = _heap->evacuate_object(obj, _thread);
124 }
125
126 Atomic::cmpxchg(resolved, p, obj);
127 }
128 }
129 }
130
131 void ShenandoahEvacUpdateOopStorageRootsClosure::do_oop(narrowOop* p) {
132 ShouldNotReachHere();
133 }
134
135 #ifdef ASSERT
136 template <class T>
137 void ShenandoahAssertNotForwardedClosure::do_oop_work(T* p) {
138 T o = RawAccess<>::oop_load(p);
139 if (!CompressedOops::is_null(o)) {
140 oop obj = CompressedOops::decode_not_null(o);
141 shenandoah_assert_not_forwarded(p, obj);
142 }
143 }
144
145 void ShenandoahAssertNotForwardedClosure::do_oop(narrowOop* p) { do_oop_work(p); }
146 void ShenandoahAssertNotForwardedClosure::do_oop(oop* p) { do_oop_work(p); }
147 #endif
148
149 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
|
__label__pos
| 0.998403 |
How to get a route to directly show a view?
Laravel: How to get a route to directly show a view?
If you have a simple controller method that is just:
1. public function about_me() {
2. return view('static.about_me');
3. }
Then you can do the following in your routes file:
1. Route::view('/about','static.about_me');
This makes it a bit quicker to code. Sometimes it is common to end up with a bunch of functions (in a controller named something like StaticPagesController) that don't do anything apart from return a view. Using the Route::view method in your routes file means you can skip making that function.
webdevetc profile pic
webdevetc
I am a 29 year old backend web developer from London, mostly focusing on PHP and Laravel lately. This (webdevetc.com) is my blog where I write about some web development topics (PHP, Laravel, Javascript, and some server stuff). contact me here.
More...
Comments and discussion about How to get a route to directly show a view?
Found this interesting? Maybe you want to read some more in this series?
Or see other topics in the Laravel language
Or see other languages/frameworks:
PHP Laravel Composer Apache CentOS and Linux Stuff WordPress General Webdev and Programming Stuff JavaScript
Or see random questions
How to show (or log) all SQL queries executed by Laravel
How do you launch the PHP interactive shell?
How to add the CSRF (Cross-site request forgery) token in Laravel?
How to check if a relation was loaded on an Eloquent model already?
How to hide the server signature (hide Apache info) from HTTP headers with .htaccess
Point all requests to one PHP file
How to split a string into an array, in JS
Force trailing slash (redirect if no trailing slash exists) in .htaccess
How to add and remove items to an array in JS?
How do you access the php://input stream?
|
__label__pos
| 0.99962 |
Module: Msf::Exploit::Git::PktLine
Included in:
SmartHttp, SmartHttp::Request, SmartHttp::Response
Defined in:
lib/msf/core/exploit/git/pkt_line.rb
Overview
This module implements the pkt-line format used by Git.
Constant Summary collapse
FLUSH_PKT =
"0000"
DELIM_PKT =
"0001"
RESPONSE_END_PKT =
"0002"
Class Method Summary collapse
Class Method Details
.generate_data_pkt(data) ⇒ Object
36
37
38
39
40
41
42
43
44
45
46
47
48
# File 'lib/msf/core/exploit/git/pkt_line.rb', line 36
def self.generate_data_pkt(data)
return nil unless data
return nil if data.empty?
# The length should include the length
# of pkt-payload plus four characters for
# pkt-len plus another for the terminating LF
pkt_line_len = data.length + 4 + 1
pkt_line_len = pkt_line_len.to_s(16).rjust(4, '0')
"#{pkt_line_len}#{data}\n"
end
.generate_pkt_line(data, type: 'data-pkt') ⇒ Object
pkt-line format pkt-line = data-pkt / flush-pkt data-pkt = pkt-len pkt-payload pkt-len = 4*(HEXDIG) pkt-payload = (pkt-len - 4)*(OCTET) source: git-scm.com/docs/protocol-common
27
28
29
30
31
32
33
34
# File 'lib/msf/core/exploit/git/pkt_line.rb', line 27
def self.generate_pkt_line(data, type: 'data-pkt')
case type
when 'data-pkt'
generate_data_pkt(data)
when 'flush-pkt'
FLUSH_PKT
end
end
.get_pkt_line_data(pkt_line) ⇒ String
Reads a single pkt-line and returns the data
Parameters:
• a (String)
single pkt-line
Returns:
• (String)
the pkt-line data
59
60
61
62
63
64
# File 'lib/msf/core/exploit/git/pkt_line.rb', line 59
def self.get_pkt_line_data(pkt_line)
return '' unless pkt_line.kind_of?(String)
line_len = pkt_line.length - 4
pkt_line[4, line_len - 1]
end
.get_pkt_lines(data) ⇒ Array
Retrieves pkt-lines from argument supplied
Parameters:
• data (String)
that possibly contains pkt-lines
Returns:
• (Array)
pkt-lines
71
72
73
74
75
76
77
78
79
# File 'lib/msf/core/exploit/git/pkt_line.rb', line 71
def self.get_pkt_lines(data)
return [] if data.empty?
pkt_lines = data.split("\n")
pkt_lines.each { |line| line.gsub!(FLUSH_PKT, '') }
pkt_lines.delete('')
pkt_lines
end
.has_pkt_line_data?(data) ⇒ Boolean
Determine if data contains any pkt-lines
Parameters:
• the (String)
data to check for pkt-lines
Returns:
• (Boolean)
86
87
88
89
90
91
92
# File 'lib/msf/core/exploit/git/pkt_line.rb', line 86
def self.has_pkt_line_data?(data)
return false unless data.kind_of?(String)
return false if data.empty?
get_pkt_lines(data).empty? ? false : true
end
.request_endsObject
50
51
52
# File 'lib/msf/core/exploit/git/pkt_line.rb', line 50
def self.request_ends
[ "#{FLUSH_PKT}0009done", "#{FLUSH_PKT}0009#{FLUSH_PKT}" ]
end
|
__label__pos
| 0.86045 |
Bro and NetBIOS
Hi,
i’m trying to use BRO to analyze data based on NetBIOS protocol: i’m using BRO 2.3.1.
I’ve made a small script with these lines:
…snippet…
const NetBIOSports = { 138/udp, 139/tcp, 445/tcp};
event bro_init() &priority=5
{
Analyzer::register_for_ports(Analyzer::ANALYZER_NETBIOSSSN,NetBIOSports);
}
Below you can see my stderr.log:
Internal error: unknown analyzer name NETBIOS; mismatch with tag analyzer::Component?
What does it mean?
Another question: SMB2.0 protocol is supported by BRO or not?
Thanks,
Vito
i'm trying to use BRO to analyze data based on NetBIOS protocol: i'm using BRO 2.3.1.
SMB support in 2.3.1 (and earlier) is broken and wildly incomplete.
Another question: SMB2.0 protocol is supported by BRO or not?
It should be moderately supported in 2.4. Broala has contributed an SMB (1+2) analyzer and it should be in 2.4.
.Seth
Hi Seth,
thanks for your support: about NetBIOS, do you have any suggestion? What’s wrong?
Vito
I don't know about the problem you're encountering, but I believe that even if you got the analyzer attached it wouldn't do much for you.
.Seth
Sorry Seth,
but i don’t understand your answer: what you mean with “… I believe that even if you got the analyzer attached it wouldn’t do much for you”?
Because i want to analyze and decode all NetBIOS traffic, with the help of google and your useful mailing list i’ve wrote a test script like this:
…snippet…
const NetBIOSports = { 138/udp, 139/tcp, 445/tcp};
event bro_init() &priority=5
{
Analyzer::register_for_ports(Analyzer::ANALYZER_NETBIOSSSN,NetBIOSports);
}
event netbios_session_message(c: connection, is_orig: bool, msg_type:count, data_len: count) &priority=5
{
print “netbios_session_message”;
}
But Bro gives me this error:
Internal error: unknown analyzer name NETBIOS; mismatch with tag analyzer::Component?
I’m using Bro in the wrong way?
|
__label__pos
| 0.999065 |
Analyzers
Community support for Analyzers (Intel VTune™ Profiler, Intel Advisor, Intel Inspector)
How to inspect a DLL?
maitrebart
Beginner
1,063 Views
My code runs in a proprietary framework as a DLL.
How can I run Inspector in order it can analyze the DLL's memory behaviors at run-time?
0 Kudos
7 Replies
Rob5
New Contributor II
1,063 Views
Thank you for the Forum post. I am currently researching possibilities.
0 Kudos
Rob5
New Contributor II
1,063 Views
Can you provide more information regarding how you're exercising the functions in your dll? Do you have an executable which is calling your dll, or are you invoking them directly from a batch? Any additional information you can provide will help determine the best course of action.
Thanks
Rob
0 Kudos
tiwcpe8
Beginner
1,063 Views
Hi Rob,
My project use Python to call function in DLL. Is there anyway to use inspector to collect the information in the DLL via Python?.
Regards,
Panitee
0 Kudos
mamey4
Beginner
1,063 Views
Hi,
I'm facing a similar task right now: I have a VB.NET application, that calls functions from a Fortran DLL (of both of which I have the source code). Is it possible to analyze memory and threading behaviour of the functions inside the Fortran DLL with Inspector XE?
Best regards,
mamey
0 Kudos
Rob5
New Contributor II
1,063 Views
Intel Inspector XE is typically used for memory error and thread checker analysis with C, C++, C# .NET, and Fortran applications on Windows*- and Linux*-based platforms. It may be possible for Intel Inspector XE to target, for example, a shell or Perl script which calls an executable binary. You may need to specify the -executable-of-interest collection action modifier. As a simple example:
Command line example:
inspxe-cl -collect mi2 -executable-of-interest=notepad.exe -- perl test.pl
Where the test.pl simply contains:
system ("notepad.exe");
I believe the situations noted in earlier posts on this thread relating to dlls would need to be attempted / explored to determine the outcome. How is the dll being exercised? Have you attempted an analysis from the command line with the proposed target? If so, what was the outcome? If not, can you attempt an analysis or provide a simple example of your implementation using a common DLL so others can work with and replicate?
Rob
0 Kudos
mamey4
Beginner
1,063 Views
The functions inside the dll can of course be called from code written in any language; however, they require a large and very specific set of input parameters, which are all calculated and put together in the VB.NET application, according to the user's interaction with the software. So it's not easy to write a script that calls the dll functions with realistic input values.
0 Kudos
SergeyKostrov
Valued Contributor II
1,063 Views
In caseofa general inspection these twosoftware products arevery helpful:
- Depends.exe utility from MS Platform SDK - allows to see all dependent DLLs;
- MS Visual Studio - allows to see and edit resources.
In case of a memory usage analysis:
- Pview.exe and Pstat.exe utilities from MS Platform SDK and, of course, any debugger.
Best regards
0 Kudos
Reply
|
__label__pos
| 0.848479 |
Radius with LDAP only without EAP
• Hello,
I have a pfsense 2.3.2 machine with the radius package. Login and group membership works, but only without EAP. I have to set the option:
LDAP Authentication Support: Enable LDAP For Authentication: on
Description: “check plain-text password against the ldap database"
Without EAP many wlan-devices make Problems. The LDAP-Server is “paedml based on UCS@school”. When I browse the ldap with an ldap-browser, I don’t see password hashes in the ldap-tree. Maybe this is the fault?
My test results:
Login with plaintext Passwort works:
Command:
radtest LdapTestUser test12345 10.0.0.2 1812 RadiusSecret
Output:
Sending Access-Request of id 227 to 10.0.0.2 port 1812
User-Name = "LdapTestUser"
User-Password = "test12345"
NAS-IP-Address = 10.0.0.2
NAS-Port = 1812
Message-Authenticator = 0x00000000000000000000000000000000
rad_recv: Access-Accept packet from host 10.0.0.2 port 1812, id=227, length=20
Command:
tail -n 1 /var/log/radius.log
Output:
Thu Jan 26 08:55:52 2017 : Auth: Login OK: [LdapTestUser/test12345] (from client localhost port 1812)
EAP Login with plaintext Passwort fails:
Command:
radtest -t eap-md5 LdapTestUser test12345 10.0.0.2 1812 RadiusSecret
Output:
Sending Access-Request packet to host 10.0.0.2 port 1812, id=5, length=0
User-Name = "LdapTestUser"
User-Password = "test12345"
NAS-IP-Address = 10.0.0.2
NAS-Port = 1812
Message-Authenticator = 0x00
EAP-Code = Response
EAP-Type-Identity = 0x70657465722e6d65796572
EAP-Message = 0x020400100170657465722e6d65796572
Received Access-Challenge packet from host 10.0.0.2 port 1812, id=5, length=80
EAP-Message = 0x010500160410d0ddf264a93eb1f59fb26602a67bfc76
Message-Authenticator = 0xd0620682737ba789ea94a10675900ebc
State = 0xc88ef904c88bfd9a410dd006961d5165
EAP-Id = 5
EAP-Code = Request
EAP-Type-MD5-Challenge = 0x10d0ddf264a93eb1f59fb26602a67bfc76
Sending Access-Request packet to host 10.0.0.2 port 1812, id=6, length=99
User-Name = "LdapTestUser"
User-Password = "test12345"
NAS-IP-Address = 10.0.0.2
NAS-Port = 1812
Message-Authenticator = 0x00000000000000000000000000000000
EAP-Code = Response
EAP-Type-MD5-Challenge = 0x10b7622aa9c3063a14109fe31c485e9086
EAP-Id = 5
State = 0xc88ef904c88bfd9a410dd006961d5165
EAP-Message = 0x020500160410b7622aa9c3063a14109fe31c485e9086
Received Access-Reject packet from host 10.0.0.2 port 1812, id=6, length=44
EAP-Message = 0x04050004
Message-Authenticator = 0x1a336028fa1abe78c868e2b1318f5a08
EAP-Id = 5
EAP-Code = Failure
Login without plaintext Passwort fails:
Command:
radtest LdapTestUser test12345 10.0.0.2 1812 RadiusSecret
Output:
Sending Access-Request of id 210 to 10.0.0.2 port 1812
User-Name = "LdapTestUser"
User-Password = "test12345"
NAS-IP-Address = 10.0.0.2
NAS-Port = 1812
Message-Authenticator = 0x00000000000000000000000000000000
rad_recv: Access-Reject packet from host 10.0.0.2 port 1812, id=210, length=20
Command:
tail -n 1 /var/log/radius.log
Output:
Thu Jan 26 08:58:15 2017 : Auth: Login incorrect: [LdapTestUser/test12345] (from client localhost port 1812)
EAP Login without plaintext Passwort fails:
Command:
radtest -t eap-md5 LdapTestUser test12345 10.0.0.2 1812 RadiusSecret
Output:
Sending Access-Request packet to host 10.0.0.2 port 1812, id=41, length=0
User-Name = "LdapTestUser"
User-Password = "test12345"
NAS-IP-Address = 10.0.0.2
NAS-Port = 1812
Message-Authenticator = 0x00
EAP-Code = Response
EAP-Type-Identity = 0x70657465722e6d65796572
EAP-Message = 0x022800100170657465722e6d65796572
Received Access-Challenge packet from host 10.0.0.2 port 1812, id=41, length=80
EAP-Message = 0x01290016041092e69b70515b4c41fd4f5f79deca9900
Message-Authenticator = 0x47a65388c92c7225c73a07b842f97838
State = 0xba499b57ba609fa1dee1ebdccb69e99b
EAP-Id = 41
EAP-Code = Request
EAP-Type-MD5-Challenge = 0x1092e69b70515b4c41fd4f5f79deca9900
Sending Access-Request packet to host 10.0.0.2 port 1812, id=42, length=99
User-Name = "LdapTestUser"
User-Password = "test12345"
NAS-IP-Address = 10.0.0.2
NAS-Port = 1812
Message-Authenticator = 0x00000000000000000000000000000000
EAP-Code = Response
EAP-Type-MD5-Challenge = 0x101f3425edf2ded1c8af487ed2bc4fd05a
EAP-Id = 41
State = 0xba499b57ba609fa1dee1ebdccb69e99b
EAP-Message = 0x0229001604101f3425edf2ded1c8af487ed2bc4fd05a
Received Access-Reject packet from host 10.0.0.2 port 1812, id=42, length=44
EAP-Message = 0x04290004
Message-Authenticator = 0xbd863e4fb16e2495681ca562d65fff14
EAP-Id = 41
EAP-Code = Failure
How can I get authentication with EAP running?
Thank you.
Samuel Schmidt
|
__label__pos
| 0.605581 |
Don’t use strlen()
Each time I see someone use strlen() I cringe. It will break.
Despite its name, strlen() doesn’t count characters. It counts bytes. In UTF-8 a character may be up to four bytes long.
So what happens if we use strlen() and its companion substr() to shorten the title of post?
<?php # -*- coding: utf-8 -*-
declare( encoding = 'UTF-8' );
header('Content-Type: text/plain;charset=utf-8');
$string = 'Doppelgänger';
print 'strlen(): ' . strlen( $string ) . "\n";
print 'mb_strlen(): ' . mb_strlen( $string, 'utf8' ) . "\n\n";
print 'substr(): ' . substr( $string, 0, 8 ) . "\n";
print 'mb_substr(): ' . mb_substr( $string, 0, 8, 'utf8' );
Output:
I have to use an image here. If I had used the plain text output our newsfeed would break. And that’s what happens each time you use strlen() and substr() on strings encoded in UTF-8: You end up with partial characters and invalid UTF-8.
Alternatives for mb_strlen()
You can use different methods to get the real string length.
$length = preg_match_all( '(.)su', $string, $matches );
See also Hakre: PHP UTF-8 string Length.
Or just use …
$length = strlen( utf8_decode( $string ) );
There is also a nice php-utf8 library on GitHub from Frank Smit.
WP_List_Table – a step by step guide
Throughout WordPress the class WP_List_Table is used to display data, e.g. users, plugins, comments, or posts. The class contains almost all necessary methods for displaying, sorting, paginating, and searching data and and what is more obvious than to use it for your own plugins?
This article tries to give you a comprehensive walk-through all necessary steps to create a table tailored to your needs.
1. Preliminary work
2. Basics
3. Sorting
4. Actions
5. Bulk actions
6. Pagination
7. Searching
8. Screen options
9. Styling the table
10. Other customizations
11. Weblinks
Preliminary work
For testing purposes we create a small plugin which adds a menu item:
<?php
/*
Plugin Name: My List Table Example
*/
?>
<div class="wrap">
<div id="icon-users" class="icon32"></div>
<h2>My List Table Test/h2>
</div>
<?php
Basics
For a start we're creating a list table with only the basic functionality. First we have to make sure that the necessary class is available since the WP_List_Table isn't loaded automatically:
if( ! class_exists( 'WP_List_Table' ) ) {
require_once( ABSPATH . 'wp-admin/includes/class-wp-list-table.php' );
}
To create a table to your needs you have to derive a class from WP_List_Table:
class My_List_Table extends WP_List_Table {
}
$myListTable = new My__List_Table();
For demonstration purposes we create some sample data. Usually this data would be read from the database:
var $example_data = array(
array('ID' => 1,'booktitle' => 'Quarter Share', 'author' => 'Nathan Lowell',
'isbn' => '978-0982514542'),
array('ID' => 2, 'booktitle' => '7th Son: Descent','author' => 'J. C. Hutchins',
'isbn' => '0312384378'),
array('ID' => 3, 'booktitle' => 'Shadowmagic', 'author' => 'John Lenahan',
'isbn' => '978-1905548927'),
array('ID' => 4, 'booktitle' => 'The Crown Conspiracy', 'author' => 'Michael J. Sullivan',
'isbn' => '978-0979621130'),
array('ID' => 5, 'booktitle' => 'Max Quick: The Pocket and the Pendant', 'author' => 'Mark Jeffrey',
'isbn' => '978-0061988929'),
array('ID' => 6, 'booktitle' => 'Jack Wakes Up: A Novel', 'author' => 'Seth Harwood',
'isbn' => '978-0307454355')
);
Before we can display the data in the table we have to define some methods and variables:
function get_columns(){
$columns = array(
'booktitle' => 'Title',
'author' => 'Author',
'isbn' => 'ISBN'
);
return $columns;
}
function prepare_items() {
$columns = $this->get_columns();
$hidden = array();
$sortable = array();
$this->_column_headers = array($columns, $hidden, $sortable);
$this->items = $this->example_data;;
}
The method get_columns() is needed to label the columns on the top and bottom of the table. The keys in the array have to be the same as in the data array otherwise the respective columns aren't displayed.
prepare_items defines two arrays controlling the behaviour of the table:
• $hidden defines the hidden columns (see Screen Options),
• $sortable defines if the table can be sorted by this column.
Finally the method assigns the example data to the class' data representation variable items.
Before actually displaying each column WordPress looks for methods called column_{key_name}, e.g. function column_booktitle. There has to be such a method for every defined column. To avoid the need to create a method for each column there is column_default that will process any column for which no special method is defined:
function column_default( $item, $column_name ) {
switch( $column_name ) {
case 'booktitle':
case 'author':
case 'isbn':
return $item[ $column_name ];
default:
return print_r( $item, true ) ; //Show the whole array for troubleshooting purposes
}
}
In our example the method will return the title for every column and if the column is not found it displays the content of the $item array for debugging purposes.
These are the essential ingredients to define a custom list table class. All you have to do now is to add an admin page to the backend, create an instance of our class, prepare the items and call display() to actually display the table:
function my_add_menu_items(){
add_menu_page( 'My Plugin List Table', 'My List Table Example', 'activate_plugins', 'my_list_test', 'my_render_list_page' );
}
add_action( 'admin_menu', 'my_add_menu_items' );
function my_render_list_page(){
$myListTable = new My_Example_List_Table();
echo '<div class="wrap"><h2>My List Table Test</h2>';
$myListTable->prepare_items();
$myListTable->display();
echo '</div>';
}
This is the minimal version of a WP_List_Table possible:
Download minimal WP_List_Table example (gist)
Sorting
At the moment the items appear in the order they are defined in the code since the WP_List_Table class does not contain any code for sorting. What it does contain is some code to mark certain columns as sortable. In section "Basics" there already was a line $sortable = array(); which now will be changed to:
$sortable = $this->get_sortable_columns();
Additionally we need the method:
function get_sortable_columns() {
$sortable_columns = array(
'booktitle' => array('booktitle',false),
'author' => array('author',false),
'isbn' => array('isbn',false)
);
return $sortable_columns;
}
This way the above mentioned column headers are changed to links and display small triangles if the mouse hovers over them. The second parameter in the value array of $sortable_columns takes care of a possible pre-ordered column. If the value is true the column is assumed to be ordered ascending, if the value is false the column is assumed descending or unordered. This is needed for the small triangle beside the column name indicating the sort order to show in the correct direction:
If you click on the column header the page is reloaded and $_GET contains something like this:
array
'page' => string 'my_list_test' (length=12)
'orderby' => string 'booktitle' (length=5)
'order' => string 'asc' (length=3)
With this information you can write a method for sorting our example data:
function usort_reorder( $a, $b ) {
// If no sort, default to title
$orderby = ( ! empty( $_GET['orderby'] ) ) ? $_GET['orderby'] : 'booktitle';
// If no order, default to asc
$order = ( ! empty($_GET['order'] ) ) ? $_GET['order'] : 'asc';
// Determine sort order
$result = strcmp( $a[$orderby], $b[$orderby] );
// Send final sort direction to usort
return ( $order === 'asc' ) ? $result : -$result;
}
To actually sort the data we have to extend prepare_items():
function prepare_items() {
[..]
usort( $this->example_data, array( &$this, 'usort_reorder' ) );
$this->items = $this->example_data;
}
If you're retrieving the data from the database (which is most likely) it's of course best to use SQL's ORDERBY directly.
Actions
If you not only want to display the items but also want to manipulate them you have to define some actions:
function column_booktitle($item) {
$actions = array(
'edit' => sprintf('<a href="?page=%s&action=%s&book=%s">Edit</a>',$_REQUEST['page'],'edit',$item['ID']),
'delete' => sprintf('<a href="?page=%s&action=%s&book=%s">Delete</a>',$_REQUEST['page'],'delete',$item['ID']),
);
return sprintf('%1$s %2$s', $item['booktitle'], $this->row_actions($actions) );
}
These actions will appear if the user hovers the mouse cursor over the table:
If you click on one of the action links the form will return for example the following data in $_GET:
array
'page' => string 'my_list_test' (length=12)
'action' => string 'delete' (length=6)
'book' => string '2' (length=1)
Bulk actions
Bulk action are implemented by overwriting the method get_bulk_actions() and returning an associated array:
function get_bulk_actions() {
$actions = array(
'delete' => 'Delete'
);
return $actions;
}
This only puts the dropdown menu and the apply button above and below the table:
The checkboxes for the rows have to be defined separately. As mentioned above there is a method column_{column} for rendering a column. The cb-column is a special case:
function column_cb($item) {
return sprintf(
'<input type="checkbox" name="book[]" value="%s" />', $item['ID']
);
}
This method currently will not be processed because we have to tell the class about the new column by extending the method get_columns():
function get_columns() {
$columns = array(
'cb' => '<input type="checkbox" />',
[..]
}
This will also put the "select all" checkbox in the title bar:
If you don't want to display the checkbox in the title you simply set the value to an empty string. Nevertheless you still have to define the key/value pair otherwise no checkboxes are shown at all:
If "Apply" is pressed the form will return various variables: action and action2 contain the selected action or -1 if the user chose no action, and if any checkbox was selected the marked rows, in our case books, for example:
'action' => string 'delete' (length=6)
'book' =>
array
0 => string '2' (length=1)
1 => string '6' (length=1)
'action2' => string '-1' (length=2)
action contains the selection from the upper select box, action2 the selection from the lower select box, and book the id of the selected rows, if any. You can use the method current_action() to query action/action2:
$action = $this->current_action();
It will return action if it's set, otherwise action2. If nothing is set the method returns FALSE.
Pagination
First things first: WordPress does not paginate your data in any way. It only contains a method to display a navigation bar on the top and bottom right of the table:
You have to tell the method how many items you have in total, how many items shall be displayed on a page, and most important, the data to be displayed on the page:
function prepare_items() {
[...]
$per_page = 5;
$current_page = $this->get_pagenum();
$total_items = count($this->example_data);
// only ncessary because we have sample data
$this->found_data = array_slice($this->example_data,(($current_page-1)*$per_page),$per_page);
$this->set_pagination_args( array(
'total_items' => $total_items, //WE have to calculate the total number of items
'per_page' => $per_page //WE have to determine how many items to show on a page
) );
$this->items = $this->found_data;
}
As pointed out in the comment the array_slice is only necessary because we use sample data. If you're retrieving the data from a database you only need to load the necessary data by using SQL's LIMIT.
Searching
If you have a huge amount of data a search field will simplify accessing certain items:
$myListTable->search_box('search', 'search_id');
The button text search is defined by the first parameter, the id of the input by the second parameter. The method creates the following output:
<p class="search-box">
<label class="screen-reader-text" for="search_id-search-input">
search:</label>
<input id="search_id-search-input" type="text" name="s" value="" />
<input id="search-submit" class="button" type="submit" name="" value="search" />
</p>
The method will place the input field and the search button on the right side and style it correctly. The <form> element is not generated. You have to add it manually, in our case this would be:
<form method="post">
<input type="hidden" name="page" value="my_list_test" />
<?php $this->search_box('search', 'search_id'); ?>
</form>
(The hidden element is needed to load the right page.)
To react to the search command you need to check the content of $_POST['s'] and filter your data accordingly before displaying the table.
Screen options
All core backend pages containing a WP_List_Table provide a "Screen Options" slide-in where the user can adjust the columns to be shown and the number of rows to be displayed.
To add options to your plugin you need to change your current code. First you have to make sure that the screen options are displayed only on the current page:
$hook = add_menu_page('My Plugin List Table', 'My List Table Example', 'activate_plugins', 'my_list_test', 'my_render_list_page');
add_action( "load-$hook", 'add_options' );
function add_options() {
$option = 'per_page';
$args = array(
'label' => 'Books',
'default' => 10,
'option' => 'books_per_page'
);
add_screen_option( $option, $args );
}
This only displays the option field and apply button, saving and loading the data has to be defined separately. WordPress provides a filter called set-screen-option to take care of this:
add_filter('set-screen-option', 'test_table_set_option', 10, 3);
function test_table_set_option($status, $option, $value) {
return $value;
}
The option is stored in the table usermeta in the database so each user has his own setting. To retrieve the option and adjust the table display accordingly the method prepare_items has to be altered (excerpt):
function prepare_items() {
[..]
//paging
$per_page = $this->get_items_per_page('books_per_page', 5);
$current_page = $this->get_pagenum();
[...]
Instead of simply assigning a number the user specified value is loaded. If the user hasn't changed the value there is no such option stored in the database and a default value is taken.
Adding the checkboxes for hiding/showing the columns is done by WordPress automatically. You just have to make sure that your derived class is instantiated before the screen option panel is rendered so that the parent class can retrieve the column names. To accomplish this the corresponding code is moved into the method add_options():
function add_options() {
global $myListTable;
$option = 'per_page';
$args = array(
'label' => 'Books',
'default' => 10,
'option' => 'books_per_page'
);
add_screen_option( $option, $args );
$myListTable = new My_Example_List_Table;
}
The user's selections are automatically saved via Ajax functions. Nevertheless you have take care by yourself that the columns are hidden if the page is loaded initially. The method get_column_info() returns all, the hidden and the sortable columns. In the method prepare_items() instead of
$columns = $this->get_columns();
$hidden = array();
$sortable = $this->get_sortable_columns();
$this->_column_headers = array($columns, $hidden, $sortable);
it's now
$this->_column_headers = $this->get_column_info();
and the columns are set according to the screen options.
Annotation: you should avoid some strings as keynames since they are treated by WordPress specially:
$special = array('_title', 'cb', 'comment', 'media', 'name', 'title', 'username', 'blogname');
Your table would still work, but you won't be able to show/hide the columns.
Styling the table
Currently the table is styled to the WordPress defaults. To change this you have to adapt the CSS classes which are automatically assigned to each column. The class name consists of the string "column-" and the key name of the $columns array, e.g. "column-isbn" or "column-author". As an example the width of the columns will be redefined (for simplicity the style data is written directly into the HTML header):
function _construct() {
[...]
add_action( 'admin_head', array( &$this, 'admin_header' ) );
[...]
}
function admin_header() {
$page = ( isset($_GET['page'] ) ) ? esc_attr( $_GET['page'] ) : false;
if( 'my_list_test' != $page )
return;
echo '<style type="text/css">';
echo '.wp-list-table .column-id { width: 5%; }';
echo '.wp-list-table .column-booktitle { width: 40%; }';
echo '.wp-list-table .column-author { width: 35%; }';
echo '.wp-list-table .column-isbn { width: 20%; }';
echo '</style>';
}
Other customizations
If there are no items in the list the standard message is "No items found." is displayed. If you want to change this message you can overwrite the method no_items():
function no_items() {
_e( 'No books found, dude.' );
}
Download complete WP_List_Table example (gist) or see the Gist
We’ve Lost A Very Good Friend, WordPress Enthusiast And An Important Part Of WPEngineer!
Sadness has descended on WPEngineer last week. Michael Preuß, one of our WPEngineer Team, died suddenly after a short illness and after a memorable life.
Frank and I are still in shock and we can't realize that Michael left us without saying goodbye. He leaves a big gap in our life. He was not only our teammate at WPEngineer, he also was a very good friend, who have shared the same love for WordPress and the community as we do.
We didn't meet us very often in person, since we live quite far away from each other. But when we did, we always had a blast and fun talking about WordPress and other stuff that was going on right now around the world. But most of the time we were talking on the phone for hours, literally.
This time our reunion was one of Frank's and my saddest moment. We had to carry Michael to his grave. It was painful, incomprehensible and unbearable at once. Unlikely for us, not one word about WordPress, this time we had to mourn the loss of our beloved friend Michael.
Feb 2009 - Matt meets WPEngineer: Frank, Michael, Matt, Alex
Feb 2009 - Matt meets WPEngineer: Frank, Michael, Matt, Alex
Just 4 years ago we got to know each other and became very good friends. Michael, Frank and I had the urge to give the WordPress community something back, a platform for writing tips, tricks and solutions for the WordPress community - WPEngineer was born! Frank and I will continue to publish future posts, despite the loss we have.
We will also help his partner and friend Heiko, to continue Michael's life's work Xtreme Theme.
He left his wonderful and caring wife Carola and his two sons Robert and Alexander behind, and I'm sure, many other WordPress Junkies, like we are, who still can't believe he is gone forever!
You will always be in our Source Code!
In deepest sympathy
Alex & Frank
Stop SOPA!
What is this all about: Help Stop SOPA/PIPA
Turn off your website on January 18th for 12 hours.
Simple and fast solution for turn the website off; use the Plugin WP Maintenance Mode, original for the activation of the maintenance mode; but its easy for this requirement. As useful; the plugin send an 503 Service Unavailable header.
Set the following settings in the plugin
Code for the field text, the video markup:
<iframe src="http://player.vimeo.com/video/31100268?byline=0&portrait=0" width="400" height="225" frameborder="0" webkitAllowFullScreen mozallowfullscreen allowFullScreen></iframe>
<p><a href="http://vimeo.com/31100268">PROTECT IP / SOPA Breaks The Internet</a> from <a href="http://vimeo.com/fightforthefuture">Fight for the Future</a>.</p>
Here is the result for not signed in visitors:
|
__label__pos
| 0.542829 |
What is An SSL Certificate?
What is An SSL Certificate?
SSL is an acronym for Secure Sockets Layer, its a standard security technology for establishing an encrypted connection between a web server or web host(Website) and a browser. This link ensures that all data passed between the web server and browsers remain private, secure and integral. SSL is an industry standard and is used by millions of websites in the protection of their online transactions information of their member or customers. Setup SSL in the domain for high security.
An SSL Certificate (secure socket layer) are small data files that digitally bind a cryptographic key to organization’s details. when installed on a web server, it actives the padlock and https protocol and allows secure connection from a web server to the browser.
SSL is used to secure credit card transaction, data transfer, and logins and securing browsing of social media site.
To use “https”, you are required to have an SSL certificate. The certificate is actually special data identifying your server and informing Internet browsers how to encrypt the data that is sent to it. When you use an SSL certificate, you ensure your eCommerce customers that they are downloading data from your site only. Your SSL certificate is only valid with your website’s domain name.
Hosting regarding for more information then visit reseller99.
READ What is TCP/IP Reference Model? What is its use ?What are the advantage and disadvantage of TCP/IP ?
Leave a Reply
Your email address will not be published. Required fields are marked *
|
__label__pos
| 0.989545 |
Chromebook Stolen – How to disable it or locate it?
disable-stolen-chromebookMany devices have ways to track them down if they are lost or stolen. Unfortunately, no Chromebooks are equipped with a GPS device which can be used to track. There is no location feature for Chromebooks because they do not have GPS.
If you left a logged in Chromebook unattended, whoever picks it up can do all sorts of mischief, including changing your Gmail password and taking over your account. they will have full access to all your saved passwords.
Chromebooks are extremely secure – but only when they are locked or logged out or powered off. Simply pressing the lock key on the keyboard is all it takes. You should change all your passwords immediately, and make sure you have enabled Google Two Step Verification for your account.
To find out your Chromebook’s logon history do the following:
1. Login to the Google Apps Admin Console
2. Click Device Management > Chrome > Devices
3. Locate your Chromebook in the list of enrolled devices. If you have the serial number you can use the search feature.
4. Once you’ve found your Chromebook in the list, click it to view the details of the device:
5. Click Recent Activity to view a list of people who recently logged onto the device and when.
When out of my house, for security I do one of the following:
1. Shut Chromebook down when I am finished using it (Ctrl-Shift-QQ takes ~ 1 second to shut down)
2. Press the Power button briefly to enable screen lock
3. For automatic screen lock, enable this setting.
Disabling a stolen Chromebook
Google has provided some great new tools to deal with stolen or lost Chromebooks. You can disable a Chromebook until it is returned. You can also add a message to the logon screen so that anyone using it is under no illusions about who that Chromebook belongs to.
1. Login to the Google Apps Admin Console
2. Click Device Management > Chrome > Devices
3. Locate your Chromebook in the list of enrolled devices. If you have the serial number you can use the search feature.
4. Once you’ve found your Chromebook in the list, click it to view the details of the device.
5. On the left click the drop down menu and select Disable from the list:
6. Then, in the new window, click Disable to stop the Chromebook being usable.
7. You can also visit the “Disabled device return instructions” page (Device Management > Chrome > Device Settings) page to enter a custom message up to 512 characters giving information about how the device can be returned to the school.
LEAVE A REPLY
Please enter your comment!
Please enter your name here
|
__label__pos
| 0.941706 |
How do I delete a group on facebook Iphone?
If you’re like most people, you probably have a few too many Facebook groups. Here’s how to delete them.
1. Open the Facebook app on your iPhone and tap the Groups tab at the bottom.
2. Tap the group you want to delete.
3. Tap the three dots in the top-right corner of the group.
4. Tap Leave Group.
5. Tap Leave Group again to confirm.
How to Delete a Facebook Group
Assuming you are an admin of the group:
1. Go to the group page
2. Click on “Edit Group Settings”
3. Scroll down to “Delete Group”
4. Click on “Delete Group” and confirm
Assuming you are not an admin of the group:
1. Go to the group page
2. Click on “Leave Group”
3. Click on “Leave Group” again to confirm
Note: Once a group is deleted, it cannot be recovered.
Frequently Asked Questions with answer of How do I delete a group on facebook Iphone?
What happens when you delete a Facebook group?
When you delete a Facebook group, it is permanently removed from the site. All group members are also removed from the group, and the group’s page is no longer accessible.
How do I delete a group on Facebook that I made?
If you’re the creator or admin of a group, you can delete it at any time. To delete a group:
Click Groups in the left side menu of your News Feed.
Click More next to the group you’d like to delete.
Click Delete Group.
Click Delete Group again to confirm.
How do you delete a Facebook group on mobile?
If you’re an admin of a group, you can delete it from your group settings.
To delete a group:
1. Go to your group settings.
2. Tap Members.
3. Scroll to the bottom and tap Leave and Delete.
4. Tap Delete Group.
What happens if I delete a group?
If you delete a group, all members of that group will be removed and will no longer have access to the group’s resources. If you delete a group, you will also lose any permissions you have to that group’s resources.
How do I get rid of groups?
If you’re trying to get rid of a group, the best way to do so is to simply leave the group. You can also delete the group if you’re the owner, but this will remove the group entirely and it can’t be undone. If you just want to get rid of the group without deleting it, you can also archive the group. This will hide the group from your profile and remove it from your list of groups, but it won’t delete the group entirely.
Is it rude to leave a WhatsApp group?
WhatsApp groups can be a great way to stay in touch with friends and family, but they can also be a source of annoyance, especially if you’re constantly being bombarded with messages. If you’re thinking of leaving a WhatsApp group, you may be wondering if it’s considered rude.
The short answer is that it’s not necessarily rude to leave a WhatsApp group, but it depends on the situation. If you’re the one who created the group, it might be seen as rude to leave without giving a reason. Additionally, if you’re the only person in the group who is active, it might be viewed as rude to leave withoutnotice.
Of course, there are always exceptions to the rule. If someone in the group is constantly sending messages that are irrelevant to you, or if the group is becoming a distraction, it might be perfectly acceptable to leave.
In the end, it’s up to you to decide whether or not leaving a WhatsApp group is rude. If you’re not sure, it’s always best to err on the side of caution and leave a polite message before you go.
How do I delete a group from my iPhone?
If you want to delete a group from your iPhone, you can do so by following these simple steps:
1. Open the “Groups” app on your iPhone.
2. Tap on the group that you want to delete.
3. Tap on the “Delete” button.
4. Confirm that you want to delete the group.
That’s all you need to do to delete a group from your iPhone.
Why do people leave group?
There are a number of reasons why people might leave a group. Some common reasons include:
-Dissatisfaction with the group or its purpose
-Dissatisfaction with the other members of the group
-Conflicts with other members
-Feeling like they don’t fit in
-Lack of time
-Geographic location
People leave groups for many different reasons. Some might be unhappy with the group itself or the other members. Others might have conflicting personalities. Some might simply not have the time to dedicate to the group. Others might live too far away from the other members.
How do I leave a group gracefully?
Leaving a group gracefully can be difficult, especially if you have been a part of the group for a long time. It is important to remember that you are not obligated to stay in a group if you are no longer enjoying it or if it is no longer serving your needs. Here are a few tips for leaving a group gracefully:
1. Thank the group for your time.
2. Let the group know that you appreciate their support, but that you are moving on.
3. Avoid being negative about the group or its members.
4. Keep in mind that you may still see members of the group, so try to remain on good terms.
5. If possible, stay in touch with members of the group that you have developed a close relationship with.
Is there a way to delete all participants from a WhatsApp group?
Yes, there is a way to delete all participants from a WhatsApp group. Here’s how:
1. Open the WhatsApp group chat.
2. Tap on the group info icon (the i in a circle).
3. Tap on the group name at the top.
4. Tap on the Participants section.
5. Tap on the red trash can icon next to each participant.
6. Tap on the Delete button when prompted.
How do you edit groups on iPhone?
If you want to edit a group on your iPhone, tap the group name, then tap the group photo. To add or remove people from the group, tap , then do one of the following:
To add people, tap Add Contact, then choose the contacts that you want to add.
To remove people, tap the contact, then tap Remove.
How do I delete a contact group?
If you have a contact group that you no longer need, you can delete it. To do this:
1. Open your Gmail contacts.
2. Hover your mouse over the contact group you want to delete.
3. Click the checkbox that appears.
4. Click the “More” drop-down menu.
5. Select “Delete”.
6. Click “OK” to confirm.
Note that this will only delete the contact group, not the individual contacts themselves.
How do you remove a contact from a group?
To remove a contact from a group, open the group, tap the contact, then tap the delete button.
Conclusion
If you want to delete a group on Facebook that you created, go to the group, click More, and then select Delete Group. Only group admins can delete groups.
Leave a Reply
Your email address will not be published.
|
__label__pos
| 0.995887 |
Use of oracles and off-chain data in smart contracts
Use of oracles and off-chain data in smart contracts
“Empowering Smart Contracts with Real-World Insights: Oracles Bridge the Gap to Off-Chain Data.”
Smart contracts are self-executing contracts with the terms of the agreement directly written into code. They run on blockchain networks and are designed to automatically enforce and execute the terms of a contract when certain conditions are met. However, by their very nature, smart contracts are isolated from the external world and cannot access off-chain data or systems directly. This limitation is where oracles come into play.
Oracles are third-party services that provide smart contracts with external information, acting as a bridge between blockchains and the outside world. They play a crucial role in expanding the functionality of smart contracts by supplying them with the necessary data to execute transactions based on real-world events and information that is not natively available on the blockchain.
The use of oracles and off-chain data enables smart contracts to interact with external APIs, access real-time data feeds (such as price information, weather reports, or stock market data), and integrate with traditional banking systems. This capability is essential for many decentralized applications (dApps) that rely on accurate and timely information from the external world to function correctly, such as those in finance (DeFi), insurance, supply chain management, and various other industries where smart contracts can benefit from real-world data inputs.
Integrating Oracles for Enhanced Smart Contract Functionality
Use of Oracles and Off-Chain Data in Smart Contracts
Smart contracts have revolutionized the way we think about executing agreements in a trustless environment. By automating the execution of contracts on the blockchain, they eliminate the need for intermediaries, thereby reducing costs and increasing efficiency. However, the functionality of smart contracts is inherently limited by their inability to access or verify real-world data independently. This is where oracles come into play, serving as a bridge between the blockchain and the outside world, and thereby significantly expanding the potential applications of smart contracts.
Oracles are third-party services that provide smart contracts with external information, which is crucial for many applications that require real-world data to function correctly. For instance, a smart contract for crop insurance would need accurate weather data to determine whether conditions have met the criteria for a payout. Without an oracle, the smart contract would be unable to access this data, rendering it ineffective for its intended purpose.
The integration of oracles into smart contracts introduces a new layer of functionality, allowing them to react to external events and execute accordingly. This capability opens up a myriad of possibilities across various industries, including finance, insurance, supply chain management, and more. Oracles can provide data such as price feeds, temperature readings, flight statuses, and even outcomes of sporting events, which can all be used to trigger specific actions within a smart contract.
However, the use of oracles also introduces a potential point of vulnerability. Since smart contracts are designed to be trustless and decentralized, relying on an external source for information could compromise these properties if the oracle becomes a single point of failure or is manipulated. To mitigate this risk, it is essential to ensure that the oracles used are reliable and secure. This can be achieved through the use of decentralized oracle networks, where multiple independent sources contribute data, and consensus mechanisms are used to validate the information before it is fed to the smart contract.
Moreover, the integration of off-chain data through oracles must be done with precision and care. The data provided by oracles must be timely, accurate, and tamper-proof to maintain the integrity of the smart contract’s execution. Developers must also consider the economic implications of using oracles, as there are often costs associated with accessing high-quality data. These costs must be balanced against the value that the data brings to the smart contract’s functionality.
In addition to providing data, oracles can also enable smart contracts to send messages and initiate transactions off-chain. This two-way interaction further extends the capabilities of smart contracts, allowing them to communicate with external systems and perform actions that were previously out of reach. For example, a smart contract could automatically restock inventory by placing an order with a supplier once a certain threshold is reached, all without human intervention.
The use of oracles and off-chain data is a critical development in the evolution of smart contracts. By incorporating external information, smart contracts can be applied to a broader range of real-world scenarios, making them more useful and relevant. As the technology continues to mature, we can expect to see more sophisticated oracle solutions that enhance the security, reliability, and efficiency of these interactions. The integration of oracles is not just an enhancement; it is a necessary step towards realizing the full potential of smart contracts and their ability to automate and streamline complex processes in a decentralized and trustless manner.
The Role of Off-Chain Data in Expanding Smart Contract Capabilities
The Role of Off-Chain Data in Expanding Smart Contract Capabilities
Smart contracts have revolutionized the way we think about executing agreements in a trustless environment. By automating the execution of contracts on the blockchain, they eliminate the need for intermediaries, thereby reducing costs and increasing efficiency. However, the functionality of smart contracts is inherently limited by their reliance on the information available on the blockchain. This is where oracles come into play, serving as a bridge between the blockchain and the outside world, and thereby significantly expanding the capabilities of smart contracts.
Oracles are third-party services that provide smart contracts with access to off-chain data, which is data that resides outside the blockchain. This data can include anything from price feeds and weather information to the outcome of a sporting event or the status of a flight. By integrating off-chain data, smart contracts can react to real-world events and execute accordingly, making them more dynamic and applicable to a broader range of use cases.
The use of oracles introduces a new layer of functionality to smart contracts, allowing them to interact with external systems and make decisions based on data that is not inherently part of the blockchain. For instance, in the case of a decentralized insurance platform, a smart contract might need to know if a certain event, such as a natural disaster, has occurred to determine whether to release funds to policyholders. An oracle would be responsible for providing this information, ensuring that the smart contract has the necessary data to execute its terms accurately.
However, the integration of off-chain data via oracles also introduces potential challenges, particularly concerning security and trust. Since oracles are external to the blockchain, they can become points of vulnerability, potentially exposing smart contracts to manipulation or errors in data reporting. To mitigate these risks, it is crucial to implement robust mechanisms for data verification and to use multiple oracles for redundancy, ensuring that the smart contract does not rely on a single source of information.
Moreover, the design of oracles must be carefully considered to maintain the decentralized nature of smart contracts. Decentralized oracles, which aggregate data from various sources and employ consensus mechanisms to validate information, can help preserve the trustless environment by reducing reliance on any single point of failure. This approach aligns with the ethos of blockchain technology, where decentralization is a key tenet.
The integration of off-chain data through oracles is not just a technical enhancement; it has profound implications for the potential applications of smart contracts. With the ability to interact with the external world, smart contracts can be used in complex financial instruments, supply chain management, and even governance systems. They can automate processes that were previously thought to be beyond the reach of blockchain technology, creating opportunities for innovation and disruption across various industries.
In conclusion, the use of oracles and off-chain data is a critical development in the evolution of smart contracts. By enabling these digital agreements to access and respond to real-world information, oracles unlock new possibilities and use cases, extending the utility of blockchain technology far beyond its original scope. As the ecosystem continues to mature, the focus on creating secure, reliable, and decentralized oracle solutions will be paramount in ensuring that smart contracts can safely and effectively harness the power of off-chain data. This ongoing development will undoubtedly play a significant role in shaping the future landscape of decentralized applications and the broader blockchain industry.
Best Practices for Secure Oracle Use in Smart Contract Design
Use of Oracles and Off-Chain Data in Smart Contracts
Smart contracts have revolutionized the way we think about executing agreements in a trustless environment. By automating transactions and other contractual obligations based on predefined rules, they eliminate the need for intermediaries. However, smart contracts inherently operate on the blockchain and are limited to the data available on-chain. This is where oracles come into play, serving as a bridge between the blockchain and the outside world. Oracles provide smart contracts with access to off-chain data, which is crucial for many applications, such as those in finance, insurance, and supply chain management. Nevertheless, the integration of oracles introduces new security considerations that must be addressed to maintain the integrity of smart contracts.
To ensure secure oracle use in smart contract design, it is essential to understand the types of oracles and the risks associated with them. Oracles can be software-based, fetching data from online sources, or hardware-based, receiving input from the physical world. Both types can be centralized, relying on a single source for data, or decentralized, aggregating data from multiple sources. Centralized oracles, while simpler and potentially more efficient, pose a significant risk as they introduce a single point of failure. Decentralized oracles, on the other hand, offer more security by distributing trust among various data points, but they can be more complex and costly to implement.
When incorporating oracles into smart contracts, developers must consider the source of the data. It is crucial to use reputable and reliable data providers with a proven track record of accuracy and uptime. The data source should be resistant to manipulation to prevent any potential tampering that could trigger incorrect contract execution. Additionally, the method of data transmission must be secure to prevent interception or alteration of the data before it reaches the smart contract.
Another best practice is to implement fail-safes and fallback mechanisms within the smart contract. These can include conditions that freeze contract execution if the data received is outside of certain parameters, indicating a possible compromise. Moreover, contracts can be designed to require multiple confirmations of data before taking action, adding an extra layer of security.
The use of consensus mechanisms among oracles can further enhance security. By requiring a majority of oracles to agree on the data before it is accepted by the smart contract, the risk of incorrect or malicious data affecting the contract’s outcome is reduced. This approach is particularly effective in decentralized oracle networks.
Transparency is also key in the use of oracles. Smart contract developers should ensure that the process of data retrieval, transmission, and processing is transparent and verifiable by all parties involved. This transparency helps build trust in the oracle system and allows for easier identification and resolution of any issues that may arise.
Finally, regular audits and monitoring of both the smart contract and the oracle system are essential. Audits by third-party security firms can uncover potential vulnerabilities, while continuous monitoring can detect anomalies in real-time, allowing for swift response to any threats.
In conclusion, while oracles are indispensable for bringing external data into smart contracts, their integration must be handled with utmost care to maintain security. By carefully selecting data sources, implementing robust fail-safes, utilizing consensus mechanisms, ensuring transparency, and conducting regular audits and monitoring, developers can mitigate the risks associated with oracles. These best practices are not just recommendations but necessities for anyone looking to leverage the full potential of smart contracts in a secure and reliable manner.
Conclusion
Conclusion:
The use of oracles and off-chain data in smart contracts is a critical innovation that enables blockchain-based applications to interact with external information, thereby expanding their functionality beyond the deterministic and closed environment of a blockchain. Oracles act as bridges between blockchains and the outside world, allowing smart contracts to execute based on real-world events and data. This integration is essential for many decentralized applications (dApps) that require accurate and timely information from the external world to operate effectively, such as in the cases of decentralized finance (DeFi), insurance, supply chain management, and prediction markets.
However, the reliance on oracles also introduces potential points of vulnerability and centralization, as the trustworthiness and security of the smart contract now depend on the oracle’s ability to provide accurate and tamper-proof data. Therefore, the development of decentralized oracle networks and trust-minimized mechanisms for off-chain data retrieval is crucial to ensure the reliability, security, and overall success of smart contracts that depend on external data.
Related posts
Streamlining cross-border remittances with faster cryptocurrency rails
Blockchain protocols for decentralized mass transit payment systems
Secure IoT firmware updates using cryptographically signed blocks
|
__label__pos
| 0.95572 |
Template for elastic indexing
Hello
I being created template to increase performance of indexing (and gain of space).
Where i can put not analyze string type ? (text & keyword from ES 5.x). The best will be not analyze all text field i think.
So, maybe I don't need metafield as source, score, beats info ? ... where i can delete few of them ?
{
"order": 0,
"version": 50001,
"template": "ta-test-edr",
"settings": {
"index": {
"number_of_replicas": 0,
"number_of_shards" : 1,
"refresh_interval": "-1"
}
},
"mappings": {
"_default_": {
"dynamic_templates": [
{
"string_fields": {
"mapping": {
"norms": false,
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"match_mapping_type": "string",
"match": "*"
}
}
],
"_all": {
"norms": false,
"enabled": true
},
"properties": {
"@timestamp": {
"include_in_all": false,
"type": "date"
},
"geoip": {
"dynamic": true,
"properties": {
"ip": {
"type": "ip"
},
"latitude": {
"type": "half_float"
},
"location": {
"type": "geo_point"
},
"longitude": {
"type": "half_float"
}
}
},
"@version": {
"include_in_all": false,
"type": "keyword"
}
}
}
},
"aliases": {}
}
I believe it's like to delete field not use :
"properties": {
"source" : { "enabled" : false },
"beat": { "enabled" : false },
"@version": { "enabled" : false },
"name": { "enabled" : false },
"version": { "enabled" : false },
"host": { "enabled" : false },
"input_type": { "enabled" : false },
"tags": { "enabled" : false },
"type": { "enabled" : false } }
Ok i belive it's work fine with this template :
PUT _template/template_edr
{
"order": 0,
"version": 50001,
"template": "edr-*",
"settings": {
"index": {
"number_of_replicas": 0,
"number_of_shards" : 1,
"refresh_interval": "-1"
}
},
"mappings": {
"_default_": {
"dynamic_templates": [
{
"string_fields": {
"mapping": {
"norms": false,
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"index": "not_analyzed"
}
}
},
"match_mapping_type": "string",
"match": "*"
}
}
],
"_all": {
"norms": false,
"enabled": true
},
"properties": {
"edr_Granctets": {
"type": "long"
},
"edr_EI": {
"type": "long"
},
"edr_Janomer": {
"type": "byte"
},
"edr_MC": {
"type": "integer"
},
"edr_DN": {
"type": "long"
},
"edr_MasDN": {
"type": "long"
},
"edr_ModonFlag": {
"type": "byte"
},
"edr_ParlFlag": {
"type": "byte"
},
"edr_SurId": {
"type": "long"
},
"edr_Usimit": {
"type": "long"
},
"edr_Usetets": {
"type": "long"
},
"@timestamp": {
"include_in_all": false,
"type": "date"
},
"geoip": {
"enabled": false
},
"source" : {
"enabled" : false
},
"beat": {
"enabled" : false
},
"@version": {
"include_in_all": false,
"type": "keyword"
},
"name": {
"enabled" : false
},
"host": {
"enabled" : false
},
"input_type": {
"enabled" : false
},
"tags": {
"enabled" : false
},
"type": {
"enabled" : false
}
}
}
},
"aliases": {}
}
How to know if my settings are correctly understand ? thank you
hummm i have some errors in elastic log :
[2017-04-07T17:28:45,155][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [uiStateJSON]
[2017-04-07T17:28:45,155][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [description]
[2017-04-07T17:28:45,156][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [savedSearchId]
[2017-04-07T17:28:45,156][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [searchSourceJSON]
[2017-04-07T17:28:45,156][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [title]
[2017-04-07T17:28:45,156][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [visState]
[2017-04-07T17:28:45,157][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [uiStateJSON]
[2017-04-07T17:28:45,157][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [description]
[2017-04-07T17:28:45,157][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [savedSearchId]
[2017-04-07T17:28:45,157][WARN ][o.e.d.i.m.StringFieldMapper$TypeParser] The [string] field is deprecated, please use [text] or [keyword] instead on [searchSourceJSON]
So I use text in my template no ? :confused:
That is what you have here;
"string_fields": {
"mapping": {
"norms": false,
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
Don't send them is the easiest way :slight_smile: But you can disable source (as it is an ES native field) if you really want.
@warkolm
"source" : {
"enabled" : false
},
That is ? or you talk to remove source field directly in logstash with the remove_field ?
So, i want remove field like input_type, beat.*, _score,_id ??? (I have disable it in template but i continu to see them in discover)
About depracated error, why ? I use text and keyword no ? @Christian_Dahlqvist
That.
You cannot remove _id, if that is what you mean.
Please don't ping people like that.
Have a look at https://www.elastic.co/guide/en/elasticsearch/reference/5.3/breaking_50_mapping_changes.html#_literal_string_literal_fields_replaced_by_literal_text_literal_literal_keyword_literal_fields
I understand @warkolm
If i do that :
mutate {
remove_field => [ "message", "beat", "input_type", "type", "tags", "host" ]
}
It's also good no ?
So, I have this error from few moment :
it's because of template (see above) when i delete my template i haven't error :confused:
Shouldn't the default be norms: false on all keyword fields?
Yes i copy it from default logstash template (GET /template).
Why ??
So I see every fields (numeric and text) in index pattern page but i see always this error message on discover
IF i want not analyze few numeric data fields i have just put :
"numeric_field" {
type="long"
index="not_analyze"
}
?
This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.
|
__label__pos
| 0.999459 |
Playbook's initial understanding of scripts
Catalog
1.Playbook's initial understanding of scripts
1. What is a playbook? Playbook is translated as a "play". The playbook consists of
play: Defines the role task of the host: Defines the specific task playbook: consists of one or more plays that can contain multiple task tasks
Simple understanding: use different modules to accomplish one thing
2. Advantages of Playbook
1. More functionality than ad-hoc 2. Better control over execution order and dependencies 3. More intuitive syntax 4.ad-hoc cannot be persisted, playbook can be persisted
3. The playbook configuration syntax is described by the yaml syntax and the extension is yaml
• indent
• YAML uses a fixed indentation style to represent hierarchical structure, where each indentation consists of two spaces and tabs cannot be used
• colon
• All colons except those ending with a colon must have spaces after them.
• Short Horizontal Line
• Represents a list item, with a short bar and a space.
• Multiple items use the same indentation level as the same list.
#playbook example
[root@manager ~]# cat f1.yml
---
- hosts: all
remote_user: root
vars:
file_name: xuliangwei
tasks:
- name: Create New File
file: name=/tmp/{{ file_name }} state=touch
#inspect
ansible-playbook --syntax-check httpd.yml
#playbook execution
[root@manager ~]# ansible-playbook f1.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Using variables] *******************************************************************************************************************************
changed: [10.0.0.30]
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=2 changed=1 unreachable=0 failed=0
Playbook execution results return color status
Red: Information indicating a task execution failure or reminder
Yellow: indicates that the remote host state has been executed and changed
Green: Successful execution
2.Playbook variable use
Playbook defines variables in three ways
1) Define variable assignment in playbook's yaml file
2) --extra-vars execution parameters assigned to variables
3) Define variables in the file
1. Define variable assignment in playbook's yaml file
#Definition in playbook
[root@manager ~]# cat f2.yml
- hosts: all
vars: #Define Variables
file_name: bgx_yaml_vars
tasks:
- name: # {{file_name}} refers to the variable defined above
file: path=/tmp/{{ file_name }} state=touch
#playbook, create bgx_yaml_vars file in / tmp directory
[root@manager ~]# ansible-playbook f1.yml
2. --extra-vars execution parameters assign to variables
#Reference variable in playbook
[root@manager ~]# cat f3.yml
- hosts: all
tasks:
- name: Create New File
file: path=/tmp/{{ file_name }} state=touch
#The playbook executes by passing in the parameter of the file_name variable, creating the bgx_extra-vars file in the / tmp directory
[root@manager ~]# ansible-playbook f2.yml --extra-vars "file_name=bgx_extra-vars"
3) Define a variable in the file: you can define it in the / etc/ansible/hosts host group, then use palybook to schedule the variable
#Define variables in files
[root@manager ~]# cat /etc/ansible/hosts
[nfs]
10.0.0.20
[nfs:vars]
file_name=bgx_filename
#Call the variable in Playbook
[root@manager ~]# cat f4.yml
---
- hosts: all
tasks:
- name: Create New File
file: path=/tmp/{{ file_name }} state=touch
#playbook, create bgx_filename file in / tmp directory
If the variables defined are duplicated and cause conflicts, the priority is as follows:
1. External arguments to extra-vars have the highest priority [all executing hosts are valid]
2. Define priority in the yml file Secondly [All executing hosts are valid]
3. The variable defined in the hosts file has the lowest priority [the current host group definition will take effect]
3.Playbook variable registration
1) Register variables: The register keyword stores the output of the specified command into a custom variable
[root@manager ~]# cat f5.yml
---
- hosts: all
tasks:
- name:
shell: netstat -lntp
register: System_Status
- name: Get System Status
debug: msg={{System_Status.stdout_lines}}
#playbook execution results
[root@manager ~]# ansible-playbook f5.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [shell] ******************************************************************************************************************************
changed: [10.0.0.30]
TASK [Get System Status] ******************************************************************************************************************
ok: [10.0.0.30] => {
"msg": [
"tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 925/sshd ",
"tcp6 0 0 :::22 :::* LISTEN 925/sshd "
]
}
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=3 changed=1 unreachable=0 failed=0
4.Playbook conditional statements
Conditional judgment statements in playbook use when
[root@manager ~]# cat f6.yml
- hosts: all
remote_user: root
tasks:
- name: Create File
file: path=/tmp/this_is_{{ ansible_hostname }}_file state=touch
when: (ansible_hostname == "nfs") or (ansible_hostname == "backup")
#Only when the system is centos's host will it execute
- name: Centos Install httpd
yum: name=httpd state=present
when: (ansible_distribution == "CentOS")
#Only if the system is a ubuntu host will it execute
- name: Ubuntu Install httpd
yum: name=httpd2 state=present
when: (ansible_distribution == "Ubuntu")
#playbook execution results:
[root@manager ~]# vim f6.yml
[root@manager ~]# ansible-playbook f6.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Create File] ************************************************************************************************************************
skipping: [10.0.0.30] #Host name mismatch is skipped and matching creates a file
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=1 changed=0 unreachable=0 failed=0
5.Playbook Loop Statement
1. Standard Reuse Scenario - Bulk Installation Software
[root@manager ~]# cat f7.yml
---
- hosts: all
remote_user: root
tasks:
- name: Installed Pkg
yum: name={{ item }} state=present
with_items:
- wget
- tree
- lrzsz
#palybook execution results
[root@manager ~]# ansible-playbook f7.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Installed Pkg] **********************************************************************************************************************
ok: [10.0.0.30] => (item=[u'wget', u'tree', u'lrzsz'])
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=2 changed=0 unreachable=0 failed=0
2. Standard reuse scenarios - creating users in bulk
[root@manager ~]# cat f7.yml
- hosts: all
remote_user: root
tasks:
- name: Add Users
user: name={{ item.name }} groups={{ item.groups }} state=present
with_items:
- { name: 'testuser1', groups: 'bin' }
- { name: 'testuser2', groups: 'root' }
#palybook execution results
[root@manager ~]# ansible-playbook f7.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Add Users] **************************************************************************************************************************
changed: [10.0.0.30] => (item={u'name': u'testuser1', u'groups': u'bin'})
changed: [10.0.0.30] => (item={u'name': u'testuser2', u'groups': u'root'})
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=2 changed=1 unreachable=0 failed=0
3. Standard reuse scenarios - Copy multiple directories
[root@manager ~]# cat f7.yml
- hosts: all
remote_user: root
tasks:
- name: Configure Rsync Server
copy: src={{ item.src }} dest=/etc/{{ item.dest }} mode={{ item.mode }}
with_items:
- {src: "rsyncd.conf", dest: "rsyncd.conf", mode: "0644"}
- {src: "rsync.passwd", dest: "rsync.passwd", mode: "0600"}
6.Playbook exception handling
The default Playbook checks the return status of commands and modules and interrupts playbook execution if an error occurs, adding parameters: ignore_errors: yes Ignores errors
[root@manager ~]# cat f9.yml
---
- hosts: all
remote_user: root
tasks:
- name: Ignore False
command: /bin/false
ignore_errors: yes
- name: touch new file
file: path=/tmp/bgx_ignore state=touch
Skip errors during playbook
[root@manager ~]# ansible-playbook f9.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Ignore False] ***********************************************************************************************************************
fatal: [10.0.0.30]: FAILED! => {"changed": true, "cmd": ["/bin/false"], "delta": "0:00:00.002819", "end": "2018-11-13 07:22:47.301758", "msg": "non-zero return code", "rc": 1, "start": "2018-11-13 07:22:47.298939", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
...ignoring
TASK [touch new file] *********************************************************************************************************************
changed: [10.0.0.30]
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=3 changed=2 unreachable=0 failed=0
7.Playbook tags Tags
1. Labeling
Label an object One object Many labels One label many objects
2. Tag usage, bundled by tags and task objects, controls partial or specified task execution
-t:Perform the specified tag label task
--skip-tags: Perform tag tasks other than--skip-tags
[root@manager ~]# cat f10.yml
---
- hosts: all
remote_user: root
tasks:
- name: Install Nfs Server
yum: name=nfs-utils state=present
tags:
- install_nfs
- install_nfs-server
- name: Service Nfs Server
service: name=nfs-server state=started enabled=yes
tags: start_nfs-server
#Perform playbook normally
[root@manager ~]# ansible-playbook f10.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Install Nfs Server] *****************************************************************************************************************
ok: [10.0.0.30]
TASK [Service Nfs Server] *****************************************************************************************************************
ok: [10.0.0.30]
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=3 changed=0 unreachable=0 failed=0
Use -t to specify tags to execute, multiple tags separated by commas
[root@manager ~]# ansible-playbook -t install_nfs-server f10.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Install Nfs Server] *****************************************************************************************************************
ok: [10.0.0.30]
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=2 changed=0 unreachable=0 failed=0
Use --skip-tags to exclude tags that are not executed
[root@manager ~]# ansible-playbook --skip-tags install_nfs-server f10.yml
PLAY [all] ********************************************************************************************************************************
TASK [Gathering Facts] ********************************************************************************************************************
ok: [10.0.0.30]
TASK [Service Nfs Server] *****************************************************************************************************************
ok: [10.0.0.30]
PLAY RECAP ********************************************************************************************************************************
10.0.0.30 : ok=2 changed=0 unreachable=0 failed=0
8.Playbook Handlers
playbook Install Apache Example
[root@m01 ~]# cat webserver.yml
- hosts: web
remote_user: root
#1. Define variables and call them in the configuration file
vars:
http_port: 8881
#2. Install the httpd service
tasks:
- name: Install Httpd Server
yum: name=httpd state=present
#3. Using the template, reference the variables defined by vars above to the configuration file
- name: Configure Httpd Server
template: src=./httpd.conf dest=/etc/httpd/conf/httpd.conf
notify: Restart Httpd Server
#4. Start the Httpd service
- name: Start Httpd Server
service: name=httpd state=started enabled=yes
#5. Check the current running port status of the Httpd service
- name: Get Httpd Server Port
shell: netstat -lntp|grep httpd
register: Httpd_Port
#6. Output the status of Httpd running to the panel
- name: Out Httpd Server Status
debug: msg={{ Httpd_Port.stdout_lines }}
ignore_errors: yes
#6. If the configuration file changes, the module below the handlers will be called
handlers:
- name: Restart Httpd Server
service: name=httpd state=restarted
9.Playbook Include
Include is used to dynamically include the tasks task list, include_tasks new/old
include Call Task Method
#Main Entry File
[root@mha ~]# cat main.yml
- hosts: all
remote_user: root
tasks:
- include_tasks: f20.yml
- include_tasks: f21.yml
#f20.yml
[root@mha ~]# cat f20.yml
- name: create file1
command: touch file1
#21.yml
[root@mha ~]# cat f21.yml
- name: create file2
command: touch file2
Keywords: Linux ansible yum shell CentOS
Added by SilveR316 on Fri, 20 Sep 2019 03:46:57 +0300
|
__label__pos
| 0.997473 |
Interface
GdkDevicePad
Description [src]
interface Gdk.DevicePad : Gdk.Device
GdkDevicePad is an interface implemented by devices of type GDK_SOURCE_TABLET_PAD
It allows querying the features provided by the pad device.
Tablet pads may contain one or more groups, each containing a subset of the buttons/rings/strips available. gdk_device_pad_get_n_groups() can be used to obtain the number of groups, gdk_device_pad_get_n_features() and gdk_device_pad_get_feature_group() can be combined to find out the number of buttons/rings/strips the device has, and how are they grouped.
Each of those groups have different modes, which may be used to map each individual pad feature to multiple actions. Only one mode is effective (current) for each given group, different groups may have different current modes. The number of available modes in a group can be found out through gdk_device_pad_get_group_n_modes(), and the current mode for a given group will be notified through events of type GDK_PAD_GROUP_MODE.
Prerequisite
In order to implement DevicePad, your type must inherit from GdkDevice.
Instance methods
gdk_device_pad_get_feature_group
Returns the group the given feature and idx belong to.
gdk_device_pad_get_group_n_modes
Returns the number of modes that group may have.
gdk_device_pad_get_n_features
Returns the number of features a tablet pad has.
gdk_device_pad_get_n_groups
Returns the number of groups this pad device has.
Interface structure
struct GdkDevicePadInterface {
/* no available fields */
}
No description available.
|
__label__pos
| 0.898202 |
Scilab Home page | Wiki | Bug tracker | Forge | Mailing list archives | ATOMS | File exchange
Please login or create an account
Change language to: Français - Português - 日本語
Please note that the recommended version of Scilab is 6.0.1. This page might be outdated.
See the recommended documentation of this function
Scilab help >> Differential Equations, Integration > ode
ode
ordinary differential equation solver
Calling Sequence
y=ode(y0,t0,t,f)
[y,w,iw]=ode([type,]y0,t0,t [,rtol [,atol]],f [,jac] [,w,iw])
[y,rd,w,iw]=ode("root",y0,t0,t [,rtol [,atol]],f [,jac],ng,g [,w,iw])
y=ode("discrete",y0,k0,kvect,f)
Arguments
y0
real vector or matrix, the initial conditions.
t0
real scalar, the initial time.
t
real vector, the times at which the solution is computed.
f
function, external, string or list, the right hand side of the differential equation.
type
a string, the solver to use. The available solvers are "adams", "stiff", "rk", "rkf", "fix", "discrete" and "roots".
rtol
real constant or real vector of the same size as y, the relative tolerance.
atol
real constant or real vectors of the same size as y, the absolute tolerance.
jac
a function, external, string or list, the Jacobian of the function f.
w,iw
real vectors.
ng
integer.
g
external (function or character string or list).
k0
integer (initial time).
kvect
integer vector.
Description
ode solves explicit Ordinary Different Equations defined by:
It is an interface to various solvers, in particular to ODEPACK.
In this help, we only describe the use of ode for standard explicit ODE systems.
The simplest call of ode is: y=ode(y0,t0,t,f) where y0 is the vector of initial conditions, t0 is the initial time, t is the vector of times at which the solution y is computed and y is matrix of solution vectors y=[y(t(1)),y(t(2)),...].
The input argument f defines the right hand side of the first order differential equation. This argument is a function with a specific header.
• If f is a Scilab function, its calling sequence must be
ydot = f(t,y)
where t is a real scalar (the time) and y a real vector (the state) and ydota real vector (the first order derivative dy/dt).
• If f is a string, it is the name of a Fortran subroutine or a C compiled function. For example, if we call ode(y0,t0,t,"fex"), then the subroutine fex is called.
The Fortran routine must have the header:
fex(n,t,y,ydot)
where n an integer, t a double precision scalar, y and ydot double precision vectors.
The C function must have the header:
fex(int *n,double *t,double *y,double *ydot)
where t is the time, y the state and ydot is the state derivative (dy/dt).
This external can be build in a OS independant way using ilib_for_link and dynamically linked to Scilab by the link function.
• It may happen that the simulator f needs extra arguments. In this case, we can use the following feature. The f argument can also be a list lst=list(simuf,u1,u2,...un) where simuf is a Scilab function with syntax: ydot = f(t,y,u1,u2,...,un) and u1, u2, ..., un are extra arguments which are automatically passed to the simulator simuf.
The function f can return a p-by-q matrix instead of a vector. With this matrix notation, we solve the n=p+q ODE's system dY/dt=F(t,Y) where Y is a p x q matrix. Then initial conditions, Y0, must also be a p x q matrix and the result of ode is the p-by-q(T+1) matrix [Y(t_0),Y(t_1),...,Y(t_T)].
The tolerances rtol and atol are threshold for relative and absolute estimated errors. The estimated error on y(i) is: rtol(i)*abs(y(i))+atol(i) and integration is carried out as far as this error is small for all components of the state. If rtol and/or atol is a constant rtol(i) and/or atol(i) are set to this constant value. Default values for rtol and atol are respectively rtol=1.d-5 and atol=1.d-7 for most solvers and rtol=1.d-3 and atol=1.d-4 for "rfk" and "fix".
For stiff problems, it is better to give the Jacobian of the RHS function as the optional argument jac. The Jacobian is an external i.e. a function with specified syntax, or the name of a Fortran subroutine or a C function (character string) with specified calling sequence or a list.
• If jac is a function the syntax should be J=jac(t,y)where t is a real scalar (time) and y a real vector (state). The result matrix J must evaluate to df/dx i.e. J(k,i) = dfk/dxi where fk is the kth component of f.
• If jac is a character string it refers to the name of a Fortran subroutine or a C function.
The Fortran routine must have the header:
subroutine fex(n,t,y,ml,mu,J,nrpd)
integer n,ml,mu,nrpd
double precision t,y(*),J(*)
The C function must have the header:
void fex(int *n,double *t,double *y,int *ml,int *mu,double *J,int *nrpd,)
In most cases you have not to refer ml, mu and nrpd.
• If jac is a list the same conventions as for f apply.
Optional arguments w and iw are vectors for storing information returned by the integration routine (see ode_optional_output for details). When these vectors are provided in RHS of ode the integration re-starts with the same parameters as in its previous stop.
More options can be given to ODEPACK solvers by using %ODEOPTIONS variable. See odeoptions.
The solvers
The type of problem solved and the method used depend on the value of the first optional argument type which can be one of the following strings:
<not given>:
lsoda solver of package ODEPACK is called by default. It automatically selects between nonstiff predictor-corrector Adams method and stiff Backward Differentiation Formula (BDF) method. It uses nonstiff method initially and dynamically monitors data in order to decide which method to use.
"adams":
This is for nonstiff problems. lsode solver of package ODEPACK is called and it uses the Adams method.
"stiff":
This is for stiff problems. lsode solver of package ODEPACK is called and it uses the BDF method.
"rk":
Adaptive Runge-Kutta of order 4 (RK4) method.
"rkf":
The Shampine and Watts program based on Fehlberg's Runge-Kutta pair of order 4 and 5 (RKF45) method is used. This is for non-stiff and mildly stiff problems when derivative evaluations are inexpensive. This method should generally not be used when the user is demanding high accuracy.
"fix":
Same solver as "rkf", but the user interface is very simple, i.e. only rtol and atol parameters can be passed to the solver. This is the simplest method to try.
"root":
ODE solver with rootfinding capabilities. The lsodar solver of package ODEPACK is used. It is a variant of the lsoda solver where it finds the roots of a given vector function. See help on ode_root for more details.
"discrete":
Discrete time simulation. See help on ode_discrete for more details.
Examples
In the following example, we solve the Ordinary Differential Equation dy/dt=y^2-y sin(t)+cos(t) with the initial condition y(0)=0. We use the default solver.
function ydot=f(t, y)
ydot=y^2-y*sin(t)+cos(t)
endfunction
y0=0;
t0=0;
t=0:0.1:%pi;
y=ode(y0,t0,t,f);
plot(t,y)
In the following example, we solve the equation dy/dt=A*y. The exact solution is y(t)=expm(A*t)*y(0), where expm is the matrix exponential. The unknown is the 2-by-1 matrix y(t).
function ydot=f(t, y)
ydot=A*y
endfunction
function J=Jacobian(t, y)
J=A
endfunction
A=[10,0;0,-1];
y0=[0;1];
t0=0;
t=1;
ode("stiff",y0,t0,t,f,Jacobian)
// Compare with exact solution:
expm(A*t)*y0
In the following example, we solve the ODE dx/dt = A x(t) + B u(t) with u(t)=sin(omega*t). Notice the extra arguments of f: A, u, B, omega are passed to function f in a list.
function xdot=linear(t, x, A, u, B, omega)
xdot=A*x+B*u(t,omega)
endfunction
function ut=u(t, omega)
ut=sin(omega*t)
endfunction
A=[1 1;0 2];
B=[1;1];
omega=5;
y0=[1;0];
t0=0;
t=[0.1,0.2,0.5,1];
ode(y0,t0,t,list(linear,A,u,B,omega))
In the following example, we solve the Riccati differential equation dX/dt=A'*X + X*A - X'*B*X + C where initial condition X(0) is the 2-by-2 identity matrix.
function Xdot=ric(t, X, A, B, C)
Xdot=A'*X+X*A-X'*B*X+C
endfunction
A=[1,1;0,2];
B=[1,0;0,1];
C=[1,0;0,1];
y0=eye(A);
t0=0;
t=0:0.1:%pi;
X=ode(y0,t0,t,list(ric,A,B,C))
In the following example, we solve the differential equation dY/dt=A*Y where the unknown Y(t) is a 2-by-2 matrix. The exact solution is Y(t)=expm(A*t), where expm is the matrix exponential.
function ydot=f(t, y, A)
ydot=A*y;
endfunction
A=[1,1;0,2];
y0=eye(A);
t0=0;
t=1;
ode(y0,t0,t,list(f,A))
// Compare with the exact solution:
expm(A*t)
ode("adams",y0,t0,t,f)
With a compiler
The following example requires a C compiler.
// ---------- Simple one dimension ODE (C coded external)
ccode=['#include <math.h>'
'void myode(int *n,double *t,double *y,double *ydot)'
'{'
' ydot[0]=y[0]*y[0]-y[0]*sin(*t)+cos(*t);'
'}']
mputl(ccode,TMPDIR+'/myode.c') //create the C file
// Compile
ilib_for_link('myode','myode.c',[],'c',TMPDIR+'/Makefile',TMPDIR+'/loader.sce');
exec(TMPDIR+'/loader.sce') //incremental linking
y0=0;
t0=0;
t=0:0.1:%pi;
y=ode(y0,t0,t,'myode');
See Also
• ode_discrete — ordinary differential equation solver, discrete time simulation
• ode_root — ordinary differential equation solver with root finding
• dassl — differential algebraic equation
• impl — differential algebraic equation
• odedc — discrete/continuous ode solver
• odeoptions — set options for ode solvers
• csim — simulation (time response) of linear system
• ltitr — discrete time response (state space)
• rtitr — discrete time response (transfer matrix)
Authors
• Alan C. Hindmarsh, Mathematics and Statistics Division, l-316 Livermore, CA, 94550.19
Bibliography
Alan C. Hindmarsh, "lsode and lsodi, two new initial value ordinary differential equation solvers", ACM-Signum newsletter, vol. 15, no. 4 (1980), pp. 10-11.
Used Functions
The associated routines can be found in SCI/modules/differential_equations/src/fortran directory: lsode.f lsoda.f lsodar.f
Scilab Enterprises
Copyright (c) 2011-2017 (Scilab Enterprises)
Copyright (c) 1989-2012 (INRIA)
Copyright (c) 1989-2007 (ENPC)
with contributors
Last updated:
Thu May 12 11:44:13 CEST 2011
|
__label__pos
| 0.798998 |
TI-Basic 84 Programming/Test Conditions and Logical Operators
From Wikibooks, open books for an open world
Jump to navigation Jump to search
Nota Bene: The TI 83/84 uses any nonzero number to represent true (most often it is 1) and zero to represent false.
Test Conditions[edit | edit source]
Test Conditions are used to compare different values and form the basis of Conditional Functions.
Equal[edit | edit source]
=, TEST:TEST:1 returns true if two input values are equal and false if not equal.
Syntax[edit | edit source]
:Value 1 = Value 2
• Where Value 1 and Value 2 are both variables of the same type.
Example[edit | edit source]
PROGRAM:TEMP
:0→X
:If X=0
:Disp "True"
prgmTEMP
True
Not Equal[edit | edit source]
≠, TEST:TEST:2 returns true if two input values are not equal and false if they are equal.
Syntax and Example[edit | edit source]
See Equal
Greater Than[edit | edit source]
>, TEST:TEST:3 returns true if first input value is larger than second input value and false if it is less than or equal.
Syntax and Example[edit | edit source]
See Equal
Greater Than or Equal To[edit | edit source]
≥, TEST:TEST:4 returns true if the first input value is larger than or equal to the second value and false if it is greater than.
Syntax and Example[edit | edit source]
See Equal
Less Than[edit | edit source]
<, TEST:TEST:5 returns true if the first input value is smaller than the second and returns false if it is greater than or equal to.
Syntax and Example[edit | edit source]
See Equal
Less Than or Equal To[edit | edit source]
≤, TEST:TEST:6 returns true if the first input value is smaller than or equal to the second input value and returns false if it is greater than.
Syntax and Example[edit | edit source]
See Equal
Logical Operators[edit | edit source]
Logical Operators help combine multiple boolean valued statements into one.
And[edit | edit source]
In order for an "and" conditional function evaluate to true, both parts of the statement have to be true or else it returns false.
Syntax[edit | edit source]
and, TEST:LOGIC:1
:Boolean 1 and Boolean 2
• Where Boolean 1 and Boolean 2 are both expressions that can be evaluated to true or false.
Example[edit | edit source]
PROGRAM:TEMP
:0→X
:1→Y
:If X=0 and Y=1
:Disp "TRUE"
prgmTEMP
TRUE
Or[edit | edit source]
In order for an "or" conditional function to evaluate to true, at least one of the parts of the statement have to be true or else it returns false.
Syntax and Example[edit | edit source]
or, TEST:LOGIC:2
See And
XOr[edit | edit source]
In order for an "xor" conditional function to evaluate to true, exactly one of the values has to be true. If both of the values are true or false it evaluates to false.
Syntax and Example[edit | edit source]
xor, TEST:LOGIC:3
See And
Not[edit | edit source]
The "not" operator is a little different from the others, it only takes one value and it evaluates to the opposite.
Syntax[edit | edit source]
not(, TEST:LOGIC:4
:not(valueA)
• valueA can be almost anything. It can be a single number since numbers represent true and false, or it can be a boolean expression that evaluates to true or false.
Example[edit | edit source]
PROGRAM:TEMP
:0→X
:If not(X≠0)
:Disp "X DOES NOT NOT EQUAL ZERO"
prgmTEMP
X DOES NOT NOT EQUAL ZERO
Previous: Conditional Functions
Next: Loops
Table of Contents: TI-Basic 84 Programming
|
__label__pos
| 0.999286 |
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) */ /* * This file implements the LEB properties tree (LPT) area. The LPT area * contains the LEB properties tree, a table of LPT area eraseblocks (ltab), and * (for the "big" model) a table of saved LEB numbers (lsave). The LPT area sits * between the log and the orphan area. * * The LPT area is like a miniature self-contained file system. It is required * that it never runs out of space, is fast to access and update, and scales * logarithmically. The LEB properties tree is implemented as a wandering tree * much like the TNC, and the LPT area has its own garbage collection. * * The LPT has two slightly different forms called the "small model" and the * "big model". The small model is used when the entire LEB properties table * can be written into a single eraseblock. In that case, garbage collection * consists of just writing the whole table, which therefore makes all other * eraseblocks reusable. In the case of the big model, dirty eraseblocks are * selected for garbage collection, which consists of marking the clean nodes in * that LEB as dirty, and then only the dirty nodes are written out. Also, in * the case of the big model, a table of LEB numbers is saved so that the entire * LPT does not to be scanned looking for empty eraseblocks when UBIFS is first * mounted. */ #include "ubifs.h" #include #include #include /** * do_calc_lpt_geom - calculate sizes for the LPT area. * @c: the UBIFS file-system description object * * Calculate the sizes of LPT bit fields, nodes, and tree, based on the * properties of the flash and whether LPT is "big" (c->big_lpt). */ static void do_calc_lpt_geom(struct ubifs_info *c) { int i, n, bits, per_leb_wastage, max_pnode_cnt; long long sz, tot_wastage; n = c->main_lebs + c->max_leb_cnt - c->leb_cnt; max_pnode_cnt = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT); c->lpt_hght = 1; n = UBIFS_LPT_FANOUT; while (n < max_pnode_cnt) { c->lpt_hght += 1; n <<= UBIFS_LPT_FANOUT_SHIFT; } c->pnode_cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT); n = DIV_ROUND_UP(c->pnode_cnt, UBIFS_LPT_FANOUT); c->nnode_cnt = n; for (i = 1; i < c->lpt_hght; i++) { n = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT); c->nnode_cnt += n; } c->space_bits = fls(c->leb_size) - 3; c->lpt_lnum_bits = fls(c->lpt_lebs); c->lpt_offs_bits = fls(c->leb_size - 1); c->lpt_spc_bits = fls(c->leb_size); n = DIV_ROUND_UP(c->max_leb_cnt, UBIFS_LPT_FANOUT); c->pcnt_bits = fls(n - 1); c->lnum_bits = fls(c->max_leb_cnt - 1); bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS + (c->big_lpt ? c->pcnt_bits : 0) + (c->space_bits * 2 + 1) * UBIFS_LPT_FANOUT; c->pnode_sz = (bits + 7) / 8; bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS + (c->big_lpt ? c->pcnt_bits : 0) + (c->lpt_lnum_bits + c->lpt_offs_bits) * UBIFS_LPT_FANOUT; c->nnode_sz = (bits + 7) / 8; bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS + c->lpt_lebs * c->lpt_spc_bits * 2; c->ltab_sz = (bits + 7) / 8; bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS + c->lnum_bits * c->lsave_cnt; c->lsave_sz = (bits + 7) / 8; /* Calculate the minimum LPT size */ c->lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; c->lpt_sz += (long long)c->nnode_cnt * c->nnode_sz; c->lpt_sz += c->ltab_sz; if (c->big_lpt) c->lpt_sz += c->lsave_sz; /* Add wastage */ sz = c->lpt_sz; per_leb_wastage = max_t(int, c->pnode_sz, c->nnode_sz); sz += per_leb_wastage; tot_wastage = per_leb_wastage; while (sz > c->leb_size) { sz += per_leb_wastage; sz -= c->leb_size; tot_wastage += per_leb_wastage; } tot_wastage += ALIGN(sz, c->min_io_size) - sz; c->lpt_sz += tot_wastage; } /** * ubifs_calc_lpt_geom - calculate and check sizes for the LPT area. * @c: the UBIFS file-system description object * * This function returns %0 on success and a negative error code on failure. */ int ubifs_calc_lpt_geom(struct ubifs_info *c) { int lebs_needed; long long sz; do_calc_lpt_geom(c); /* Verify that lpt_lebs is big enough */ sz = c->lpt_sz * 2; /* Must have at least 2 times the size */ lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); if (lebs_needed > c->lpt_lebs) { ubifs_err(c, "too few LPT LEBs"); return -EINVAL; } /* Verify that ltab fits in a single LEB (since ltab is a single node */ if (c->ltab_sz > c->leb_size) { ubifs_err(c, "LPT ltab too big"); return -EINVAL; } c->check_lpt_free = c->big_lpt; return 0; } /** * calc_dflt_lpt_geom - calculate default LPT geometry. * @c: the UBIFS file-system description object * @main_lebs: number of main area LEBs is passed and returned here * @big_lpt: whether the LPT area is "big" is returned here * * The size of the LPT area depends on parameters that themselves are dependent * on the size of the LPT area. This function, successively recalculates the LPT * area geometry until the parameters and resultant geometry are consistent. * * This function returns %0 on success and a negative error code on failure. */ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs, int *big_lpt) { int i, lebs_needed; long long sz; /* Start by assuming the minimum number of LPT LEBs */ c->lpt_lebs = UBIFS_MIN_LPT_LEBS; c->main_lebs = *main_lebs - c->lpt_lebs; if (c->main_lebs <= 0) return -EINVAL; /* And assume we will use the small LPT model */ c->big_lpt = 0; /* * Calculate the geometry based on assumptions above and then see if it * makes sense */ do_calc_lpt_geom(c); /* Small LPT model must have lpt_sz < leb_size */ if (c->lpt_sz > c->leb_size) { /* Nope, so try again using big LPT model */ c->big_lpt = 1; do_calc_lpt_geom(c); } /* Now check there are enough LPT LEBs */ for (i = 0; i < 64 ; i++) { sz = c->lpt_sz * 4; /* Allow 4 times the size */ lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); if (lebs_needed > c->lpt_lebs) { /* Not enough LPT LEBs so try again with more */ c->lpt_lebs = lebs_needed; c->main_lebs = *main_lebs - c->lpt_lebs; if (c->main_lebs <= 0) return -EINVAL; do_calc_lpt_geom(c); continue; } if (c->ltab_sz > c->leb_size) { ubifs_err(c, "LPT ltab too big"); return -EINVAL; } *main_lebs = c->main_lebs; *big_lpt = c->big_lpt; return 0; } return -EINVAL; } /** * pack_bits - pack bit fields end-to-end. * @addr: address at which to pack (passed and next address returned) * @pos: bit position at which to pack (passed and next position returned) * @val: value to pack * @nrbits: number of bits of value to pack (1-32) */ static void pack_bits(uint8_t **addr, int *pos, uint32_t val, int nrbits) { uint8_t *p = *addr; int b = *pos; ubifs_assert(nrbits > 0); ubifs_assert(nrbits <= 32); ubifs_assert(*pos >= 0); ubifs_assert(*pos < 8); ubifs_assert((val >> nrbits) == 0 || nrbits == 32); if (b) { *p |= ((uint8_t)val) << b; nrbits += b; if (nrbits > 8) { *++p = (uint8_t)(val >>= (8 - b)); if (nrbits > 16) { *++p = (uint8_t)(val >>= 8); if (nrbits > 24) { *++p = (uint8_t)(val >>= 8); if (nrbits > 32) *++p = (uint8_t)(val >>= 8); } } } } else { *p = (uint8_t)val; if (nrbits > 8) { *++p = (uint8_t)(val >>= 8); if (nrbits > 16) { *++p = (uint8_t)(val >>= 8); if (nrbits > 24) *++p = (uint8_t)(val >>= 8); } } } b = nrbits & 7; if (b == 0) p++; *addr = p; *pos = b; } /** * ubifs_unpack_bits - unpack bit fields. * @addr: address at which to unpack (passed and next address returned) * @pos: bit position at which to unpack (passed and next position returned) * @nrbits: number of bits of value to unpack (1-32) * * This functions returns the value unpacked. */ uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits) { const int k = 32 - nrbits; uint8_t *p = *addr; int b = *pos; uint32_t uninitialized_var(val); const int bytes = (nrbits + b + 7) >> 3; ubifs_assert(nrbits > 0); ubifs_assert(nrbits <= 32); ubifs_assert(*pos >= 0); ubifs_assert(*pos < 8); if (b) { switch (bytes) { case 2: val = p[1]; break; case 3: val = p[1] | ((uint32_t)p[2] << 8); break; case 4: val = p[1] | ((uint32_t)p[2] << 8) | ((uint32_t)p[3] << 16); break; case 5: val = p[1] | ((uint32_t)p[2] << 8) | ((uint32_t)p[3] << 16) | ((uint32_t)p[4] << 24); } val <<= (8 - b); val |= *p >> b; nrbits += b; } else { switch (bytes) { case 1: val = p[0]; break; case 2: val = p[0] | ((uint32_t)p[1] << 8); break; case 3: val = p[0] | ((uint32_t)p[1] << 8) | ((uint32_t)p[2] << 16); break; case 4: val = p[0] | ((uint32_t)p[1] << 8) | ((uint32_t)p[2] << 16) | ((uint32_t)p[3] << 24); break; } } val <<= k; val >>= k; b = nrbits & 7; p += nrbits >> 3; *addr = p; *pos = b; ubifs_assert((val >> nrbits) == 0 || nrbits - b == 32); return val; } /** * ubifs_pack_pnode - pack all the bit fields of a pnode. * @c: UBIFS file-system description object * @buf: buffer into which to pack * @pnode: pnode to pack */ void ubifs_pack_pnode(struct ubifs_info *c, void *buf, struct ubifs_pnode *pnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0; uint16_t crc; pack_bits(&addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS); if (c->big_lpt) pack_bits(&addr, &pos, pnode->num, c->pcnt_bits); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { pack_bits(&addr, &pos, pnode->lprops[i].free >> 3, c->space_bits); pack_bits(&addr, &pos, pnode->lprops[i].dirty >> 3, c->space_bits); if (pnode->lprops[i].flags & LPROPS_INDEX) pack_bits(&addr, &pos, 1, 1); else pack_bits(&addr, &pos, 0, 1); } crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, c->pnode_sz - UBIFS_LPT_CRC_BYTES); addr = buf; pos = 0; pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); } /** * ubifs_pack_nnode - pack all the bit fields of a nnode. * @c: UBIFS file-system description object * @buf: buffer into which to pack * @nnode: nnode to pack */ void ubifs_pack_nnode(struct ubifs_info *c, void *buf, struct ubifs_nnode *nnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0; uint16_t crc; pack_bits(&addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS); if (c->big_lpt) pack_bits(&addr, &pos, nnode->num, c->pcnt_bits); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { int lnum = nnode->nbranch[i].lnum; if (lnum == 0) lnum = c->lpt_last + 1; pack_bits(&addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits); pack_bits(&addr, &pos, nnode->nbranch[i].offs, c->lpt_offs_bits); } crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, c->nnode_sz - UBIFS_LPT_CRC_BYTES); addr = buf; pos = 0; pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); } /** * ubifs_pack_ltab - pack the LPT's own lprops table. * @c: UBIFS file-system description object * @buf: buffer into which to pack * @ltab: LPT's own lprops table to pack */ void ubifs_pack_ltab(struct ubifs_info *c, void *buf, struct ubifs_lpt_lprops *ltab) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0; uint16_t crc; pack_bits(&addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS); for (i = 0; i < c->lpt_lebs; i++) { pack_bits(&addr, &pos, ltab[i].free, c->lpt_spc_bits); pack_bits(&addr, &pos, ltab[i].dirty, c->lpt_spc_bits); } crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, c->ltab_sz - UBIFS_LPT_CRC_BYTES); addr = buf; pos = 0; pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); } /** * ubifs_pack_lsave - pack the LPT's save table. * @c: UBIFS file-system description object * @buf: buffer into which to pack * @lsave: LPT's save table to pack */ void ubifs_pack_lsave(struct ubifs_info *c, void *buf, int *lsave) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0; uint16_t crc; pack_bits(&addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS); for (i = 0; i < c->lsave_cnt; i++) pack_bits(&addr, &pos, lsave[i], c->lnum_bits); crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, c->lsave_sz - UBIFS_LPT_CRC_BYTES); addr = buf; pos = 0; pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); } /** * ubifs_add_lpt_dirt - add dirty space to LPT LEB properties. * @c: UBIFS file-system description object * @lnum: LEB number to which to add dirty space * @dirty: amount of dirty space to add */ void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty) { if (!dirty || !lnum) return; dbg_lp("LEB %d add %d to %d", lnum, dirty, c->ltab[lnum - c->lpt_first].dirty); ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); c->ltab[lnum - c->lpt_first].dirty += dirty; } /** * set_ltab - set LPT LEB properties. * @c: UBIFS file-system description object * @lnum: LEB number * @free: amount of free space * @dirty: amount of dirty space */ static void set_ltab(struct ubifs_info *c, int lnum, int free, int dirty) { dbg_lp("LEB %d free %d dirty %d to %d %d", lnum, c->ltab[lnum - c->lpt_first].free, c->ltab[lnum - c->lpt_first].dirty, free, dirty); ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); c->ltab[lnum - c->lpt_first].free = free; c->ltab[lnum - c->lpt_first].dirty = dirty; } /** * ubifs_add_nnode_dirt - add dirty space to LPT LEB properties. * @c: UBIFS file-system description object * @nnode: nnode for which to add dirt */ void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode) { struct ubifs_nnode *np = nnode->parent; if (np) ubifs_add_lpt_dirt(c, np->nbranch[nnode->iip].lnum, c->nnode_sz); else { ubifs_add_lpt_dirt(c, c->lpt_lnum, c->nnode_sz); if (!(c->lpt_drty_flgs & LTAB_DIRTY)) { c->lpt_drty_flgs |= LTAB_DIRTY; ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz); } } } /** * add_pnode_dirt - add dirty space to LPT LEB properties. * @c: UBIFS file-system description object * @pnode: pnode for which to add dirt */ static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode) { ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum, c->pnode_sz); } /** * calc_nnode_num - calculate nnode number. * @row: the row in the tree (root is zero) * @col: the column in the row (leftmost is zero) * * The nnode number is a number that uniquely identifies a nnode and can be used * easily to traverse the tree from the root to that nnode. * * This function calculates and returns the nnode number for the nnode at @row * and @col. */ static int calc_nnode_num(int row, int col) { int num, bits; num = 1; while (row--) { bits = (col & (UBIFS_LPT_FANOUT - 1)); col >>= UBIFS_LPT_FANOUT_SHIFT; num <<= UBIFS_LPT_FANOUT_SHIFT; num |= bits; } return num; } /** * calc_nnode_num_from_parent - calculate nnode number. * @c: UBIFS file-system description object * @parent: parent nnode * @iip: index in parent * * The nnode number is a number that uniquely identifies a nnode and can be used * easily to traverse the tree from the root to that nnode. * * This function calculates and returns the nnode number based on the parent's * nnode number and the index in parent. */ static int calc_nnode_num_from_parent(const struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { int num, shft; if (!parent) return 1; shft = (c->lpt_hght - parent->level) * UBIFS_LPT_FANOUT_SHIFT; num = parent->num ^ (1 << shft); num |= (UBIFS_LPT_FANOUT + iip) << shft; return num; } /** * calc_pnode_num_from_parent - calculate pnode number. * @c: UBIFS file-system description object * @parent: parent nnode * @iip: index in parent * * The pnode number is a number that uniquely identifies a pnode and can be used * easily to traverse the tree from the root to that pnode. * * This function calculates and returns the pnode number based on the parent's * nnode number and the index in parent. */ static int calc_pnode_num_from_parent(const struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0; for (i = 0; i < n; i++) { num <<= UBIFS_LPT_FANOUT_SHIFT; num |= pnum & (UBIFS_LPT_FANOUT - 1); pnum >>= UBIFS_LPT_FANOUT_SHIFT; } num <<= UBIFS_LPT_FANOUT_SHIFT; num |= iip; return num; } /** * ubifs_create_dflt_lpt - create default LPT. * @c: UBIFS file-system description object * @main_lebs: number of main area LEBs is passed and returned here * @lpt_first: LEB number of first LPT LEB * @lpt_lebs: number of LEBs for LPT is passed and returned here * @big_lpt: use big LPT model is passed and returned here * * This function returns %0 on success and a negative error code on failure. */ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, int *lpt_lebs, int *big_lpt) { int lnum, err = 0, node_sz, iopos, i, j, cnt, len, alen, row; int blnum, boffs, bsz, bcnt; struct ubifs_pnode *pnode = NULL; struct ubifs_nnode *nnode = NULL; void *buf = NULL, *p; struct ubifs_lpt_lprops *ltab = NULL; int *lsave = NULL; err = calc_dflt_lpt_geom(c, main_lebs, big_lpt); if (err) return err; *lpt_lebs = c->lpt_lebs; /* Needed by 'ubifs_pack_nnode()' and 'set_ltab()' */ c->lpt_first = lpt_first; /* Needed by 'set_ltab()' */ c->lpt_last = lpt_first + c->lpt_lebs - 1; /* Needed by 'ubifs_pack_lsave()' */ c->main_first = c->leb_cnt - *main_lebs; lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_KERNEL); pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL); nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL); buf = vmalloc(c->leb_size); ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs); if (!pnode || !nnode || !buf || !ltab || !lsave) { err = -ENOMEM; goto out; } ubifs_assert(!c->ltab); c->ltab = ltab; /* Needed by set_ltab */ /* Initialize LPT's own lprops */ for (i = 0; i < c->lpt_lebs; i++) { ltab[i].free = c->leb_size; ltab[i].dirty = 0; ltab[i].tgc = 0; ltab[i].cmt = 0; } lnum = lpt_first; p = buf; /* Number of leaf nodes (pnodes) */ cnt = c->pnode_cnt; /* * The first pnode contains the LEB properties for the LEBs that contain * the root inode node and the root index node of the index tree. */ node_sz = ALIGN(ubifs_idx_node_sz(c, 1), 8); iopos = ALIGN(node_sz, c->min_io_size); pnode->lprops[0].free = c->leb_size - iopos; pnode->lprops[0].dirty = iopos - node_sz; pnode->lprops[0].flags = LPROPS_INDEX; node_sz = UBIFS_INO_NODE_SZ; iopos = ALIGN(node_sz, c->min_io_size); pnode->lprops[1].free = c->leb_size - iopos; pnode->lprops[1].dirty = iopos - node_sz; for (i = 2; i < UBIFS_LPT_FANOUT; i++) pnode->lprops[i].free = c->leb_size; /* Add first pnode */ ubifs_pack_pnode(c, p, pnode); p += c->pnode_sz; len = c->pnode_sz; pnode->num += 1; /* Reset pnode values for remaining pnodes */ pnode->lprops[0].free = c->leb_size; pnode->lprops[0].dirty = 0; pnode->lprops[0].flags = 0; pnode->lprops[1].free = c->leb_size; pnode->lprops[1].dirty = 0; /* * To calculate the internal node branches, we keep information about * the level below. */ blnum = lnum; /* LEB number of level below */ boffs = 0; /* Offset of level below */ bcnt = cnt; /* Number of nodes in level below */ bsz = c->pnode_sz; /* Size of nodes in level below */ /* Add all remaining pnodes */ for (i = 1; i < cnt; i++) { if (len + c->pnode_sz > c->leb_size) { alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); err = ubifs_leb_change(c, lnum++, buf, alen); if (err) goto out; p = buf; len = 0; } ubifs_pack_pnode(c, p, pnode); p += c->pnode_sz; len += c->pnode_sz; /* * pnodes are simply numbered left to right starting at zero, * which means the pnode number can be used easily to traverse * down the tree to the corresponding pnode. */ pnode->num += 1; } row = 0; for (i = UBIFS_LPT_FANOUT; cnt > i; i <<= UBIFS_LPT_FANOUT_SHIFT) row += 1; /* Add all nnodes, one level at a time */ while (1) { /* Number of internal nodes (nnodes) at next level */ cnt = DIV_ROUND_UP(cnt, UBIFS_LPT_FANOUT); for (i = 0; i < cnt; i++) { if (len + c->nnode_sz > c->leb_size) { alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); err = ubifs_leb_change(c, lnum++, buf, alen); if (err) goto out; p = buf; len = 0; } /* Only 1 nnode at this level, so it is the root */ if (cnt == 1) { c->lpt_lnum = lnum; c->lpt_offs = len; } /* Set branches to the level below */ for (j = 0; j < UBIFS_LPT_FANOUT; j++) { if (bcnt) { if (boffs + bsz > c->leb_size) { blnum += 1; boffs = 0; } nnode->nbranch[j].lnum = blnum; nnode->nbranch[j].offs = boffs; boffs += bsz; bcnt--; } else { nnode->nbranch[j].lnum = 0; nnode->nbranch[j].offs = 0; } } nnode->num = calc_nnode_num(row, i); ubifs_pack_nnode(c, p, nnode); p += c->nnode_sz; len += c->nnode_sz; } /* Only 1 nnode at this level, so it is the root */ if (cnt == 1) break; /* Update the information about the level below */ bcnt = cnt; bsz = c->nnode_sz; row -= 1; } if (*big_lpt) { /* Need to add LPT's save table */ if (len + c->lsave_sz > c->leb_size) { alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); err = ubifs_leb_change(c, lnum++, buf, alen); if (err) goto out; p = buf; len = 0; } c->lsave_lnum = lnum; c->lsave_offs = len; for (i = 0; i < c->lsave_cnt && i < *main_lebs; i++) lsave[i] = c->main_first + i; for (; i < c->lsave_cnt; i++) lsave[i] = c->main_first; ubifs_pack_lsave(c, p, lsave); p += c->lsave_sz; len += c->lsave_sz; } /* Need to add LPT's own LEB properties table */ if (len + c->ltab_sz > c->leb_size) { alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); err = ubifs_leb_change(c, lnum++, buf, alen); if (err) goto out; p = buf; len = 0; } c->ltab_lnum = lnum; c->ltab_offs = len; /* Update ltab before packing it */ len += c->ltab_sz; alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); ubifs_pack_ltab(c, p, ltab); p += c->ltab_sz; /* Write remaining buffer */ memset(p, 0xff, alen - len); err = ubifs_leb_change(c, lnum, buf, alen); if (err) goto out; c->nhead_lnum = lnum; c->nhead_offs = ALIGN(len, c->min_io_size); dbg_lp("space_bits %d", c->space_bits); dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits); dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits); dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits); dbg_lp("pcnt_bits %d", c->pcnt_bits); dbg_lp("lnum_bits %d", c->lnum_bits); dbg_lp("pnode_sz %d", c->pnode_sz); dbg_lp("nnode_sz %d", c->nnode_sz); dbg_lp("ltab_sz %d", c->ltab_sz); dbg_lp("lsave_sz %d", c->lsave_sz); dbg_lp("lsave_cnt %d", c->lsave_cnt); dbg_lp("lpt_hght %d", c->lpt_hght); dbg_lp("big_lpt %d", c->big_lpt); dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs); dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs); dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs); if (c->big_lpt) dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); out: c->ltab = NULL; kfree(lsave); vfree(ltab); vfree(buf); kfree(nnode); kfree(pnode); return err; } /** * update_cats - add LEB properties of a pnode to LEB category lists and heaps. * @c: UBIFS file-system description object * @pnode: pnode * * When a pnode is loaded into memory, the LEB properties it contains are added, * by this function, to the LEB category lists and heaps. */ static void update_cats(struct ubifs_info *c, struct ubifs_pnode *pnode) { int i; for (i = 0; i < UBIFS_LPT_FANOUT; i++) { int cat = pnode->lprops[i].flags & LPROPS_CAT_MASK; int lnum = pnode->lprops[i].lnum; if (!lnum) return; ubifs_add_to_cat(c, &pnode->lprops[i], cat); } } /** * replace_cats - add LEB properties of a pnode to LEB category lists and heaps. * @c: UBIFS file-system description object * @old_pnode: pnode copied * @new_pnode: pnode copy * * During commit it is sometimes necessary to copy a pnode * (see dirty_cow_pnode). When that happens, references in * category lists and heaps must be replaced. This function does that. */ static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode, struct ubifs_pnode *new_pnode) { int i; for (i = 0; i < UBIFS_LPT_FANOUT; i++) { if (!new_pnode->lprops[i].lnum) return; ubifs_replace_cat(c, &old_pnode->lprops[i], &new_pnode->lprops[i]); } } /** * check_lpt_crc - check LPT node crc is correct. * @c: UBIFS file-system description object * @buf: buffer containing node * @len: length of node * * This function returns %0 on success and a negative error code on failure. */ static int check_lpt_crc(const struct ubifs_info *c, void *buf, int len) { int pos = 0; uint8_t *addr = buf; uint16_t crc, calc_crc; crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS); calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, len - UBIFS_LPT_CRC_BYTES); if (crc != calc_crc) { ubifs_err(c, "invalid crc in LPT node: crc %hx calc %hx", crc, calc_crc); dump_stack(); return -EINVAL; } return 0; } /** * check_lpt_type - check LPT node type is correct. * @c: UBIFS file-system description object * @addr: address of type bit field is passed and returned updated here * @pos: position of type bit field is passed and returned updated here * @type: expected type * * This function returns %0 on success and a negative error code on failure. */ static int check_lpt_type(const struct ubifs_info *c, uint8_t **addr, int *pos, int type) { int node_type; node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS); if (node_type != type) { ubifs_err(c, "invalid type (%d) in LPT node type %d", node_type, type); dump_stack(); return -EINVAL; } return 0; } /** * unpack_pnode - unpack a pnode. * @c: UBIFS file-system description object * @buf: buffer containing packed pnode to unpack * @pnode: pnode structure to fill * * This function returns %0 on success and a negative error code on failure. */ static int unpack_pnode(const struct ubifs_info *c, void *buf, struct ubifs_pnode *pnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_PNODE); if (err) return err; if (c->big_lpt) pnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { struct ubifs_lprops * const lprops = &pnode->lprops[i]; lprops->free = ubifs_unpack_bits(&addr, &pos, c->space_bits); lprops->free <<= 3; lprops->dirty = ubifs_unpack_bits(&addr, &pos, c->space_bits); lprops->dirty <<= 3; if (ubifs_unpack_bits(&addr, &pos, 1)) lprops->flags = LPROPS_INDEX; else lprops->flags = 0; lprops->flags |= ubifs_categorize_lprops(c, lprops); } err = check_lpt_crc(c, buf, c->pnode_sz); return err; } /** * ubifs_unpack_nnode - unpack a nnode. * @c: UBIFS file-system description object * @buf: buffer containing packed nnode to unpack * @nnode: nnode structure to fill * * This function returns %0 on success and a negative error code on failure. */ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, struct ubifs_nnode *nnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_NNODE); if (err) return err; if (c->big_lpt) nnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { int lnum; lnum = ubifs_unpack_bits(&addr, &pos, c->lpt_lnum_bits) + c->lpt_first; if (lnum == c->lpt_last + 1) lnum = 0; nnode->nbranch[i].lnum = lnum; nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos, c->lpt_offs_bits); } err = check_lpt_crc(c, buf, c->nnode_sz); return err; } /** * unpack_ltab - unpack the LPT's own lprops table. * @c: UBIFS file-system description object * @buf: buffer from which to unpack * * This function returns %0 on success and a negative error code on failure. */ static int unpack_ltab(const struct ubifs_info *c, void *buf) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LTAB); if (err) return err; for (i = 0; i < c->lpt_lebs; i++) { int free = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits); int dirty = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits); if (free < 0 || free > c->leb_size || dirty < 0 || dirty > c->leb_size || free + dirty > c->leb_size) return -EINVAL; c->ltab[i].free = free; c->ltab[i].dirty = dirty; c->ltab[i].tgc = 0; c->ltab[i].cmt = 0; } err = check_lpt_crc(c, buf, c->ltab_sz); return err; } /** * unpack_lsave - unpack the LPT's save table. * @c: UBIFS file-system description object * @buf: buffer from which to unpack * * This function returns %0 on success and a negative error code on failure. */ static int unpack_lsave(const struct ubifs_info *c, void *buf) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LSAVE); if (err) return err; for (i = 0; i < c->lsave_cnt; i++) { int lnum = ubifs_unpack_bits(&addr, &pos, c->lnum_bits); if (lnum < c->main_first || lnum >= c->leb_cnt) return -EINVAL; c->lsave[i] = lnum; } err = check_lpt_crc(c, buf, c->lsave_sz); return err; } /** * validate_nnode - validate a nnode. * @c: UBIFS file-system description object * @nnode: nnode to validate * @parent: parent nnode (or NULL for the root nnode) * @iip: index in parent * * This function returns %0 on success and a negative error code on failure. */ static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode, struct ubifs_nnode *parent, int iip) { int i, lvl, max_offs; if (c->big_lpt) { int num = calc_nnode_num_from_parent(c, parent, iip); if (nnode->num != num) return -EINVAL; } lvl = parent ? parent->level - 1 : c->lpt_hght; if (lvl < 1) return -EINVAL; if (lvl == 1) max_offs = c->leb_size - c->pnode_sz; else max_offs = c->leb_size - c->nnode_sz; for (i = 0; i < UBIFS_LPT_FANOUT; i++) { int lnum = nnode->nbranch[i].lnum; int offs = nnode->nbranch[i].offs; if (lnum == 0) { if (offs != 0) return -EINVAL; continue; } if (lnum < c->lpt_first || lnum > c->lpt_last) return -EINVAL; if (offs < 0 || offs > max_offs) return -EINVAL; } return 0; } /** * validate_pnode - validate a pnode. * @c: UBIFS file-system description object * @pnode: pnode to validate * @parent: parent nnode * @iip: index in parent * * This function returns %0 on success and a negative error code on failure. */ static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode, struct ubifs_nnode *parent, int iip) { int i; if (c->big_lpt) { int num = calc_pnode_num_from_parent(c, parent, iip); if (pnode->num != num) return -EINVAL; } for (i = 0; i < UBIFS_LPT_FANOUT; i++) { int free = pnode->lprops[i].free; int dirty = pnode->lprops[i].dirty; if (free < 0 || free > c->leb_size || free % c->min_io_size || (free & 7)) return -EINVAL; if (dirty < 0 || dirty > c->leb_size || (dirty & 7)) return -EINVAL; if (dirty + free > c->leb_size) return -EINVAL; } return 0; } /** * set_pnode_lnum - set LEB numbers on a pnode. * @c: UBIFS file-system description object * @pnode: pnode to update * * This function calculates the LEB numbers for the LEB properties it contains * based on the pnode number. */ static void set_pnode_lnum(const struct ubifs_info *c, struct ubifs_pnode *pnode) { int i, lnum; lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + c->main_first; for (i = 0; i < UBIFS_LPT_FANOUT; i++) { if (lnum >= c->leb_cnt) return; pnode->lprops[i].lnum = lnum++; } } /** * ubifs_read_nnode - read a nnode from flash and link it to the tree in memory. * @c: UBIFS file-system description object * @parent: parent nnode (or NULL for the root) * @iip: index in parent * * This function returns %0 on success and a negative error code on failure. */ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { struct ubifs_nbranch *branch = NULL; struct ubifs_nnode *nnode = NULL; void *buf = c->lpt_nod_buf; int err, lnum, offs; if (parent) { branch = &parent->nbranch[iip]; lnum = branch->lnum; offs = branch->offs; } else { lnum = c->lpt_lnum; offs = c->lpt_offs; } nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_NOFS); if (!nnode) { err = -ENOMEM; goto out; } if (lnum == 0) { /* * This nnode was not written which just means that the LEB * properties in the subtree below it describe empty LEBs. We * make the nnode as though we had read it, which in fact means * doing almost nothing. */ if (c->big_lpt) nnode->num = calc_nnode_num_from_parent(c, parent, iip); } else { err = ubifs_leb_read(c, lnum, buf, offs, c->nnode_sz, 1); if (err) goto out; err = ubifs_unpack_nnode(c, buf, nnode); if (err) goto out; } err = validate_nnode(c, nnode, parent, iip); if (err) goto out; if (!c->big_lpt) nnode->num = calc_nnode_num_from_parent(c, parent, iip); if (parent) { branch->nnode = nnode; nnode->level = parent->level - 1; } else { c->nroot = nnode; nnode->level = c->lpt_hght; } nnode->parent = parent; nnode->iip = iip; return 0; out: ubifs_err(c, "error %d reading nnode at %d:%d", err, lnum, offs); dump_stack(); kfree(nnode); return err; } /** * read_pnode - read a pnode from flash and link it to the tree in memory. * @c: UBIFS file-system description object * @parent: parent nnode * @iip: index in parent * * This function returns %0 on success and a negative error code on failure. */ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { struct ubifs_nbranch *branch; struct ubifs_pnode *pnode = NULL; void *buf = c->lpt_nod_buf; int err, lnum, offs; branch = &parent->nbranch[iip]; lnum = branch->lnum; offs = branch->offs; pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS); if (!pnode) return -ENOMEM; if (lnum == 0) { /* * This pnode was not written which just means that the LEB * properties in it describe empty LEBs. We make the pnode as * though we had read it. */ int i; if (c->big_lpt) pnode->num = calc_pnode_num_from_parent(c, parent, iip); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { struct ubifs_lprops * const lprops = &pnode->lprops[i]; lprops->free = c->leb_size; lprops->flags = ubifs_categorize_lprops(c, lprops); } } else { err = ubifs_leb_read(c, lnum, buf, offs, c->pnode_sz, 1); if (err) goto out; err = unpack_pnode(c, buf, pnode); if (err) goto out; } err = validate_pnode(c, pnode, parent, iip); if (err) goto out; if (!c->big_lpt) pnode->num = calc_pnode_num_from_parent(c, parent, iip); branch->pnode = pnode; pnode->parent = parent; pnode->iip = iip; set_pnode_lnum(c, pnode); c->pnodes_have += 1; return 0; out: ubifs_err(c, "error %d reading pnode at %d:%d", err, lnum, offs); ubifs_dump_pnode(c, pnode, parent, iip); dump_stack(); ubifs_err(c, "calc num: %d", calc_pnode_num_from_parent(c, parent, iip)); kfree(pnode); return err; } /** * read_ltab - read LPT's own lprops table. * @c: UBIFS file-system description object * * This function returns %0 on success and a negative error code on failure. */ static int read_ltab(struct ubifs_info *c) { int err; void *buf; buf = vmalloc(c->ltab_sz); if (!buf) return -ENOMEM; err = ubifs_leb_read(c, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz, 1); if (err) goto out; err = unpack_ltab(c, buf); out: vfree(buf); return err; } /** * read_lsave - read LPT's save table. * @c: UBIFS file-system description object * * This function returns %0 on success and a negative error code on failure. */ static int read_lsave(struct ubifs_info *c) { int err, i; void *buf; buf = vmalloc(c->lsave_sz); if (!buf) return -ENOMEM; err = ubifs_leb_read(c, c->lsave_lnum, buf, c->lsave_offs, c->lsave_sz, 1); if (err) goto out; err = unpack_lsave(c, buf); if (err) goto out; for (i = 0; i < c->lsave_cnt; i++) { int lnum = c->lsave[i]; struct ubifs_lprops *lprops; /* * Due to automatic resizing, the values in the lsave table * could be beyond the volume size - just ignore them. */ if (lnum >= c->leb_cnt) continue; lprops = ubifs_lpt_lookup(c, lnum); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } } out: vfree(buf); return err; } /** * ubifs_get_nnode - get a nnode. * @c: UBIFS file-system description object * @parent: parent nnode (or NULL for the root) * @iip: index in parent * * This function returns a pointer to the nnode on success or a negative error * code on failure. */ struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { struct ubifs_nbranch *branch; struct ubifs_nnode *nnode; int err; branch = &parent->nbranch[iip]; nnode = branch->nnode; if (nnode) return nnode; err = ubifs_read_nnode(c, parent, iip); if (err) return ERR_PTR(err); return branch->nnode; } /** * ubifs_get_pnode - get a pnode. * @c: UBIFS file-system description object * @parent: parent nnode * @iip: index in parent * * This function returns a pointer to the pnode on success or a negative error * code on failure. */ struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { struct ubifs_nbranch *branch; struct ubifs_pnode *pnode; int err; branch = &parent->nbranch[iip]; pnode = branch->pnode; if (pnode) return pnode; err = read_pnode(c, parent, iip); if (err) return ERR_PTR(err); update_cats(c, branch->pnode); return branch->pnode; } /** * ubifs_lpt_lookup - lookup LEB properties in the LPT. * @c: UBIFS file-system description object * @lnum: LEB number to lookup * * This function returns a pointer to the LEB properties on success or a * negative error code on failure. */ struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum) { int err, i, h, iip, shft; struct ubifs_nnode *nnode; struct ubifs_pnode *pnode; if (!c->nroot) { err = ubifs_read_nnode(c, NULL, 0); if (err) return ERR_PTR(err); } nnode = c->nroot; i = lnum - c->main_first; shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; for (h = 1; h < c->lpt_hght; h++) { iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); shft -= UBIFS_LPT_FANOUT_SHIFT; nnode = ubifs_get_nnode(c, nnode, iip); if (IS_ERR(nnode)) return ERR_CAST(nnode); } iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); pnode = ubifs_get_pnode(c, nnode, iip); if (IS_ERR(pnode)) return ERR_CAST(pnode); iip = (i & (UBIFS_LPT_FANOUT - 1)); dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum, pnode->lprops[iip].free, pnode->lprops[iip].dirty, pnode->lprops[iip].flags); return &pnode->lprops[iip]; } /** * dirty_cow_nnode - ensure a nnode is not being committed. * @c: UBIFS file-system description object * @nnode: nnode to check * * Returns dirtied nnode on success or negative error code on failure. */ static struct ubifs_nnode *dirty_cow_nnode(struct ubifs_info *c, struct ubifs_nnode *nnode) { struct ubifs_nnode *n; int i; if (!test_bit(COW_CNODE, &nnode->flags)) { /* nnode is not being committed */ if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) { c->dirty_nn_cnt += 1; ubifs_add_nnode_dirt(c, nnode); } return nnode; } /* nnode is being committed, so copy it */ n = kmemdup(nnode, sizeof(struct ubifs_nnode), GFP_NOFS); if (unlikely(!n)) return ERR_PTR(-ENOMEM); n->cnext = NULL; __set_bit(DIRTY_CNODE, &n->flags); __clear_bit(COW_CNODE, &n->flags); /* The children now have new parent */ for (i = 0; i < UBIFS_LPT_FANOUT; i++) { struct ubifs_nbranch *branch = &n->nbranch[i]; if (branch->cnode) branch->cnode->parent = n; } ubifs_assert(!test_bit(OBSOLETE_CNODE, &nnode->flags)); __set_bit(OBSOLETE_CNODE, &nnode->flags); c->dirty_nn_cnt += 1; ubifs_add_nnode_dirt(c, nnode); if (nnode->parent) nnode->parent->nbranch[n->iip].nnode = n; else c->nroot = n; return n; } /** * dirty_cow_pnode - ensure a pnode is not being committed. * @c: UBIFS file-system description object * @pnode: pnode to check * * Returns dirtied pnode on success or negative error code on failure. */ static struct ubifs_pnode *dirty_cow_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode) { struct ubifs_pnode *p; if (!test_bit(COW_CNODE, &pnode->flags)) { /* pnode is not being committed */ if (!test_and_set_bit(DIRTY_CNODE, &pnode->flags)) { c->dirty_pn_cnt += 1; add_pnode_dirt(c, pnode); } return pnode; } /* pnode is being committed, so copy it */ p = kmemdup(pnode, sizeof(struct ubifs_pnode), GFP_NOFS); if (unlikely(!p)) return ERR_PTR(-ENOMEM); p->cnext = NULL; __set_bit(DIRTY_CNODE, &p->flags); __clear_bit(COW_CNODE, &p->flags); replace_cats(c, pnode, p); ubifs_assert(!test_bit(OBSOLETE_CNODE, &pnode->flags)); __set_bit(OBSOLETE_CNODE, &pnode->flags); c->dirty_pn_cnt += 1; add_pnode_dirt(c, pnode); pnode->parent->nbranch[p->iip].pnode = p; return p; } /** * ubifs_lpt_lookup_dirty - lookup LEB properties in the LPT. * @c: UBIFS file-system description object * @lnum: LEB number to lookup * * This function returns a pointer to the LEB properties on success or a * negative error code on failure. */ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum) { int err, i, h, iip, shft; struct ubifs_nnode *nnode; struct ubifs_pnode *pnode; if (!c->nroot) { err = ubifs_read_nnode(c, NULL, 0); if (err) return ERR_PTR(err); } nnode = c->nroot; nnode = dirty_cow_nnode(c, nnode); if (IS_ERR(nnode)) return ERR_CAST(nnode); i = lnum - c->main_first; shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; for (h = 1; h < c->lpt_hght; h++) { iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); shft -= UBIFS_LPT_FANOUT_SHIFT; nnode = ubifs_get_nnode(c, nnode, iip); if (IS_ERR(nnode)) return ERR_CAST(nnode); nnode = dirty_cow_nnode(c, nnode); if (IS_ERR(nnode)) return ERR_CAST(nnode); } iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); pnode = ubifs_get_pnode(c, nnode, iip); if (IS_ERR(pnode)) return ERR_CAST(pnode); pnode = dirty_cow_pnode(c, pnode); if (IS_ERR(pnode)) return ERR_CAST(pnode); iip = (i & (UBIFS_LPT_FANOUT - 1)); dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum, pnode->lprops[iip].free, pnode->lprops[iip].dirty, pnode->lprops[iip].flags); ubifs_assert(test_bit(DIRTY_CNODE, &pnode->flags)); return &pnode->lprops[iip]; } /** * lpt_init_rd - initialize the LPT for reading. * @c: UBIFS file-system description object * * This function returns %0 on success and a negative error code on failure. */ static int lpt_init_rd(struct ubifs_info *c) { int err, i; c->ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs); if (!c->ltab) return -ENOMEM; i = max_t(int, c->nnode_sz, c->pnode_sz); c->lpt_nod_buf = kmalloc(i, GFP_KERNEL); if (!c->lpt_nod_buf) return -ENOMEM; for (i = 0; i < LPROPS_HEAP_CNT; i++) { c->lpt_heap[i].arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ, GFP_KERNEL); if (!c->lpt_heap[i].arr) return -ENOMEM; c->lpt_heap[i].cnt = 0; c->lpt_heap[i].max_cnt = LPT_HEAP_SZ; } c->dirty_idx.arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ, GFP_KERNEL); if (!c->dirty_idx.arr) return -ENOMEM; c->dirty_idx.cnt = 0; c->dirty_idx.max_cnt = LPT_HEAP_SZ; err = read_ltab(c); if (err) return err; dbg_lp("space_bits %d", c->space_bits); dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits); dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits); dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits); dbg_lp("pcnt_bits %d", c->pcnt_bits); dbg_lp("lnum_bits %d", c->lnum_bits); dbg_lp("pnode_sz %d", c->pnode_sz); dbg_lp("nnode_sz %d", c->nnode_sz); dbg_lp("ltab_sz %d", c->ltab_sz); dbg_lp("lsave_sz %d", c->lsave_sz); dbg_lp("lsave_cnt %d", c->lsave_cnt); dbg_lp("lpt_hght %d", c->lpt_hght); dbg_lp("big_lpt %d", c->big_lpt); dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs); dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs); dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs); if (c->big_lpt) dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); return 0; } /** * lpt_init_wr - initialize the LPT for writing. * @c: UBIFS file-system description object * * 'lpt_init_rd()' must have been called already. * * This function returns %0 on success and a negative error code on failure. */ static int lpt_init_wr(struct ubifs_info *c) { int err, i; c->ltab_cmt = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs); if (!c->ltab_cmt) return -ENOMEM; c->lpt_buf = vmalloc(c->leb_size); if (!c->lpt_buf) return -ENOMEM; if (c->big_lpt) { c->lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_NOFS); if (!c->lsave) return -ENOMEM; err = read_lsave(c); if (err) return err; } for (i = 0; i < c->lpt_lebs; i++) if (c->ltab[i].free == c->leb_size) { err = ubifs_leb_unmap(c, i + c->lpt_first); if (err) return err; } return 0; } /** * ubifs_lpt_init - initialize the LPT. * @c: UBIFS file-system description object * @rd: whether to initialize lpt for reading * @wr: whether to initialize lpt for writing * * For mounting 'rw', @rd and @wr are both true. For mounting 'ro', @rd is true * and @wr is false. For mounting from 'ro' to 'rw', @rd is false and @wr is * true. * * This function returns %0 on success and a negative error code on failure. */ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr) { int err; if (rd) { err = lpt_init_rd(c); if (err) goto out_err; } if (wr) { err = lpt_init_wr(c); if (err) goto out_err; } return 0; out_err: if (wr) ubifs_lpt_free(c, 1); if (rd) ubifs_lpt_free(c, 0); return err; } /** * struct lpt_scan_node - somewhere to put nodes while we scan LPT. * @nnode: where to keep a nnode * @pnode: where to keep a pnode * @cnode: where to keep a cnode * @in_tree: is the node in the tree in memory * @ptr.nnode: pointer to the nnode (if it is an nnode) which may be here or in * the tree * @ptr.pnode: ditto for pnode * @ptr.cnode: ditto for cnode */ struct lpt_scan_node { union { struct ubifs_nnode nnode; struct ubifs_pnode pnode; struct ubifs_cnode cnode; }; int in_tree; union { struct ubifs_nnode *nnode; struct ubifs_pnode *pnode; struct ubifs_cnode *cnode; } ptr; }; /** * scan_get_nnode - for the scan, get a nnode from either the tree or flash. * @c: the UBIFS file-system description object * @path: where to put the nnode * @parent: parent of the nnode * @iip: index in parent of the nnode * * This function returns a pointer to the nnode on success or a negative error * code on failure. */ static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c, struct lpt_scan_node *path, struct ubifs_nnode *parent, int iip) { struct ubifs_nbranch *branch; struct ubifs_nnode *nnode; void *buf = c->lpt_nod_buf; int err; branch = &parent->nbranch[iip]; nnode = branch->nnode; if (nnode) { path->in_tree = 1; path->ptr.nnode = nnode; return nnode; } nnode = &path->nnode; path->in_tree = 0; path->ptr.nnode = nnode; memset(nnode, 0, sizeof(struct ubifs_nnode)); if (branch->lnum == 0) { /* * This nnode was not written which just means that the LEB * properties in the subtree below it describe empty LEBs. We * make the nnode as though we had read it, which in fact means * doing almost nothing. */ if (c->big_lpt) nnode->num = calc_nnode_num_from_parent(c, parent, iip); } else { err = ubifs_leb_read(c, branch->lnum, buf, branch->offs, c->nnode_sz, 1); if (err) return ERR_PTR(err); err = ubifs_unpack_nnode(c, buf, nnode); if (err) return ERR_PTR(err); } err = validate_nnode(c, nnode, parent, iip); if (err) return ERR_PTR(err); if (!c->big_lpt) nnode->num = calc_nnode_num_from_parent(c, parent, iip); nnode->level = parent->level - 1; nnode->parent = parent; nnode->iip = iip; return nnode; } /** * scan_get_pnode - for the scan, get a pnode from either the tree or flash. * @c: the UBIFS file-system description object * @path: where to put the pnode * @parent: parent of the pnode * @iip: index in parent of the pnode * * This function returns a pointer to the pnode on success or a negative error * code on failure. */ static struct ubifs_pnode *scan_get_pnode(struct ubifs_info *c, struct lpt_scan_node *path, struct ubifs_nnode *parent, int iip) { struct ubifs_nbranch *branch; struct ubifs_pnode *pnode; void *buf = c->lpt_nod_buf; int err; branch = &parent->nbranch[iip]; pnode = branch->pnode; if (pnode) { path->in_tree = 1; path->ptr.pnode = pnode; return pnode; } pnode = &path->pnode; path->in_tree = 0; path->ptr.pnode = pnode; memset(pnode, 0, sizeof(struct ubifs_pnode)); if (branch->lnum == 0) { /* * This pnode was not written which just means that the LEB * properties in it describe empty LEBs. We make the pnode as * though we had read it. */ int i; if (c->big_lpt) pnode->num = calc_pnode_num_from_parent(c, parent, iip); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { struct ubifs_lprops * const lprops = &pnode->lprops[i]; lprops->free = c->leb_size; lprops->flags = ubifs_categorize_lprops(c, lprops); } } else { ubifs_assert(branch->lnum >= c->lpt_first && branch->lnum <= c->lpt_last); ubifs_assert(branch->offs >= 0 && branch->offs < c->leb_size); err = ubifs_leb_read(c, branch->lnum, buf, branch->offs, c->pnode_sz, 1); if (err) return ERR_PTR(err); err = unpack_pnode(c, buf, pnode); if (err) return ERR_PTR(err); } err = validate_pnode(c, pnode, parent, iip); if (err) return ERR_PTR(err); if (!c->big_lpt) pnode->num = calc_pnode_num_from_parent(c, parent, iip); pnode->parent = parent; pnode->iip = iip; set_pnode_lnum(c, pnode); return pnode; } /** * ubifs_lpt_scan_nolock - scan the LPT. * @c: the UBIFS file-system description object * @start_lnum: LEB number from which to start scanning * @end_lnum: LEB number at which to stop scanning * @scan_cb: callback function called for each lprops * @data: data to be passed to the callback function * * This function returns %0 on success and a negative error code on failure. */ int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum, ubifs_lpt_scan_callback scan_cb, void *data) { int err = 0, i, h, iip, shft; struct ubifs_nnode *nnode; struct ubifs_pnode *pnode; struct lpt_scan_node *path; if (start_lnum == -1) { start_lnum = end_lnum + 1; if (start_lnum >= c->leb_cnt) start_lnum = c->main_first; } ubifs_assert(start_lnum >= c->main_first && start_lnum < c->leb_cnt); ubifs_assert(end_lnum >= c->main_first && end_lnum < c->leb_cnt); if (!c->nroot) { err = ubifs_read_nnode(c, NULL, 0); if (err) return err; } path = kmalloc(sizeof(struct lpt_scan_node) * (c->lpt_hght + 1), GFP_NOFS); if (!path) return -ENOMEM; path[0].ptr.nnode = c->nroot; path[0].in_tree = 1; again: /* Descend to the pnode containing start_lnum */ nnode = c->nroot; i = start_lnum - c->main_first; shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; for (h = 1; h < c->lpt_hght; h++) { iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); shft -= UBIFS_LPT_FANOUT_SHIFT; nnode = scan_get_nnode(c, path + h, nnode, iip); if (IS_ERR(nnode)) { err = PTR_ERR(nnode); goto out; } } iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); pnode = scan_get_pnode(c, path + h, nnode, iip); if (IS_ERR(pnode)) { err = PTR_ERR(pnode); goto out; } iip = (i & (UBIFS_LPT_FANOUT - 1)); /* Loop for each lprops */ while (1) { struct ubifs_lprops *lprops = &pnode->lprops[iip]; int ret, lnum = lprops->lnum; ret = scan_cb(c, lprops, path[h].in_tree, data); if (ret < 0) { err = ret; goto out; } if (ret & LPT_SCAN_ADD) { /* Add all the nodes in path to the tree in memory */ for (h = 1; h < c->lpt_hght; h++) { const size_t sz = sizeof(struct ubifs_nnode); struct ubifs_nnode *parent; if (path[h].in_tree) continue; nnode = kmemdup(&path[h].nnode, sz, GFP_NOFS); if (!nnode) { err = -ENOMEM; goto out; } parent = nnode->parent; parent->nbranch[nnode->iip].nnode = nnode; path[h].ptr.nnode = nnode; path[h].in_tree = 1; path[h + 1].cnode.parent = nnode; } if (path[h].in_tree) ubifs_ensure_cat(c, lprops); else { const size_t sz = sizeof(struct ubifs_pnode); struct ubifs_nnode *parent; pnode = kmemdup(&path[h].pnode, sz, GFP_NOFS); if (!pnode) { err = -ENOMEM; goto out; } parent = pnode->parent; parent->nbranch[pnode->iip].pnode = pnode; path[h].ptr.pnode = pnode; path[h].in_tree = 1; update_cats(c, pnode); c->pnodes_have += 1; } err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *) c->nroot, 0, 0); if (err) goto out; err = dbg_check_cats(c); if (err) goto out; } if (ret & LPT_SCAN_STOP) { err = 0; break; } /* Get the next lprops */ if (lnum == end_lnum) { /* * We got to the end without finding what we were * looking for */ err = -ENOSPC; goto out; } if (lnum + 1 >= c->leb_cnt) { /* Wrap-around to the beginning */ start_lnum = c->main_first; goto again; } if (iip + 1 < UBIFS_LPT_FANOUT) { /* Next lprops is in the same pnode */ iip += 1; continue; } /* We need to get the next pnode. Go up until we can go right */ iip = pnode->iip; while (1) { h -= 1; ubifs_assert(h >= 0); nnode = path[h].ptr.nnode; if (iip + 1 < UBIFS_LPT_FANOUT) break; iip = nnode->iip; } /* Go right */ iip += 1; /* Descend to the pnode */ h += 1; for (; h < c->lpt_hght; h++) { nnode = scan_get_nnode(c, path + h, nnode, iip); if (IS_ERR(nnode)) { err = PTR_ERR(nnode); goto out; } iip = 0; } pnode = scan_get_pnode(c, path + h, nnode, iip); if (IS_ERR(pnode)) { err = PTR_ERR(pnode); goto out; } iip = 0; } out: kfree(path); return err; } /** * dbg_chk_pnode - check a pnode. * @c: the UBIFS file-system description object * @pnode: pnode to check * @col: pnode column * * This function returns %0 on success and a negative error code on failure. */ static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, int col) { int i; if (pnode->num != col) { ubifs_err(c, "pnode num %d expected %d parent num %d iip %d", pnode->num, col, pnode->parent->num, pnode->iip); return -EINVAL; } for (i = 0; i < UBIFS_LPT_FANOUT; i++) { struct ubifs_lprops *lp, *lprops = &pnode->lprops[i]; int lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + i + c->main_first; int found, cat = lprops->flags & LPROPS_CAT_MASK; struct ubifs_lpt_heap *heap; struct list_head *list = NULL; if (lnum >= c->leb_cnt) continue; if (lprops->lnum != lnum) { ubifs_err(c, "bad LEB number %d expected %d", lprops->lnum, lnum); return -EINVAL; } if (lprops->flags & LPROPS_TAKEN) { if (cat != LPROPS_UNCAT) { ubifs_err(c, "LEB %d taken but not uncat %d", lprops->lnum, cat); return -EINVAL; } continue; } if (lprops->flags & LPROPS_INDEX) { switch (cat) { case LPROPS_UNCAT: case LPROPS_DIRTY_IDX: case LPROPS_FRDI_IDX: break; default: ubifs_err(c, "LEB %d index but cat %d", lprops->lnum, cat); return -EINVAL; } } else { switch (cat) { case LPROPS_UNCAT: case LPROPS_DIRTY: case LPROPS_FREE: case LPROPS_EMPTY: case LPROPS_FREEABLE: break; default: ubifs_err(c, "LEB %d not index but cat %d", lprops->lnum, cat); return -EINVAL; } } switch (cat) { case LPROPS_UNCAT: list = &c->uncat_list; break; case LPROPS_EMPTY: list = &c->empty_list; break; case LPROPS_FREEABLE: list = &c->freeable_list; break; case LPROPS_FRDI_IDX: list = &c->frdi_idx_list; break; } found = 0; switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: heap = &c->lpt_heap[cat - 1]; if (lprops->hpos < heap->cnt && heap->arr[lprops->hpos] == lprops) found = 1; break; case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FREEABLE: case LPROPS_FRDI_IDX: list_for_each_entry(lp, list, list) if (lprops == lp) { found = 1; break; } break; } if (!found) { ubifs_err(c, "LEB %d cat %d not found in cat heap/list", lprops->lnum, cat); return -EINVAL; } switch (cat) { case LPROPS_EMPTY: if (lprops->free != c->leb_size) { ubifs_err(c, "LEB %d cat %d free %d dirty %d", lprops->lnum, cat, lprops->free, lprops->dirty); return -EINVAL; } break; case LPROPS_FREEABLE: case LPROPS_FRDI_IDX: if (lprops->free + lprops->dirty != c->leb_size) { ubifs_err(c, "LEB %d cat %d free %d dirty %d", lprops->lnum, cat, lprops->free, lprops->dirty); return -EINVAL; } break; } } return 0; } /** * dbg_check_lpt_nodes - check nnodes and pnodes. * @c: the UBIFS file-system description object * @cnode: next cnode (nnode or pnode) to check * @row: row of cnode (root is zero) * @col: column of cnode (leftmost is zero) * * This function returns %0 on success and a negative error code on failure. */ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, int row, int col) { struct ubifs_nnode *nnode, *nn; struct ubifs_cnode *cn; int num, iip = 0, err; if (!dbg_is_chk_lprops(c)) return 0; while (cnode) { ubifs_assert(row >= 0); nnode = cnode->parent; if (cnode->level) { /* cnode is a nnode */ num = calc_nnode_num(row, col); if (cnode->num != num) { ubifs_err(c, "nnode num %d expected %d parent num %d iip %d", cnode->num, num, (nnode ? nnode->num : 0), cnode->iip); return -EINVAL; } nn = (struct ubifs_nnode *)cnode; while (iip < UBIFS_LPT_FANOUT) { cn = nn->nbranch[iip].cnode; if (cn) { /* Go down */ row += 1; col <<= UBIFS_LPT_FANOUT_SHIFT; col += iip; iip = 0; cnode = cn; break; } /* Go right */ iip += 1; } if (iip < UBIFS_LPT_FANOUT) continue; } else { struct ubifs_pnode *pnode; /* cnode is a pnode */ pnode = (struct ubifs_pnode *)cnode; err = dbg_chk_pnode(c, pnode, col); if (err) return err; } /* Go up and to the right */ row -= 1; col >>= UBIFS_LPT_FANOUT_SHIFT; iip = cnode->iip + 1; cnode = (struct ubifs_cnode *)nnode; } return 0; }
|
__label__pos
| 0.996827 |
Strip Caller ID for a specific incoming route
Is there a possibility that you can stip the incoming caller ID, just one one incoming route and prepend something like “Private”? If so, can someone point me in the right direction for doing this?
Thank You
There is the Set CallerID module, you can route calls from that route thru this module to alter the CID. Original CID will still show up in CDRs but, will not show up on phones as they ring.
https://wiki.freepbx.org/display/FPG/Set+CallerID+Module
Thank you for the reply. I did look at that module, but I did not see where I could tag the caller ID for a specific incoming route. How do I make this module work only for a specific incoming route?
You use the destination of the inbound route to send the caller to the Set CID:
Inbound Route → Set CallerID → etc
I finally understood. Under inbound routes, instead of extensions / EXT#, I chose Set CallerID and I set the ext underneath the SetCallerID module. It worked like a charm. Thank you for your help!
1 Like
This topic was automatically closed 7 days after the last reply. New replies are no longer allowed.
|
__label__pos
| 0.995181 |
One of the greatest ironies in the computer world is that laptops should not be used on laps. Notebooks on the other hand, laptops smaller cousin, has a name that makes a little more sense because you can use a notebook on a notebook, but you should not use a notebook on a lap, and you can use a laptop on a notebook. Got it?
The problem comes from the way laptops and notebooks are designed to ventilate air; most machines have inflow vents on the bottom, truly a genius move. Laptop airflow works great only on a flat surface due to four little feet, each a few millimeters in height, that provide a gap of space between the bottom of the machine and the flat surface.
If the machine sits on an uneven fabric covered surface, like a lap, then the vent will become covered, proper airflow will cease, and overheating will occur. This is the reason you might have complained about a laptop becoming too hot after propping it up with a pillow. With the air vent covered, the machine has to work twice as hard, and if the vent stays covered for an extended period of time, it will harm your laptop.
Of course every laptop and notebook is designed differently, some models have the outflow vents on the bottom and the inflow vents on the sides, while others may have both vents on the sides. No matter the make or model, if any of the vents become covered, then this can spell bad news for your machine. When using a laptop you should also be aware of your surroundings, if you are using it on a floor or on a piece of furniture, then the intake vent will suck in debris like dust and hair, this can also contribute to overheating.
The solution is to always use your laptop on a hard surface, like a desk or table. A more convenient solution is a laptop pillow desk. This is basically a pillow with a hard surface attached to the top that allows you to safely use your laptop on your lap, your bed, or any plushy surface. Laptop pillow desks can be found at major retailers, and they range in price from $10-$30; the pricier ones even have bonus features like a pencil tray and cup holders.
Another solution is to trade out your laptop for a tablet. Tablets are even better for mobility because they don’t heat up as much, so they do not require air vents. The power of tablet computers are improving with each generation, but tablets still fall short of doing everything a laptop can do.
With proper laptop and notebook usage, you can get maximum performance from your computer and it will run smoothly for years. Think your laptop needs a tune up or a good cleaning? Contact The Server Group Integrated Technology at 1-877-777-6514 and keep your hardware properly maintained.
|
__label__pos
| 0.64581 |
Core Java
Java Evolution
Java History
Java Features
Benefits of Java over C and C++
How Java works
The Java Programming Language
The Java Platform
Java Development Kit
HARDWARE AND SOFTWARE REQUIREMENTS
Disadvantages of Java
Overview of Java Language
Developing the Java Application
Comments in Java Code
Defining a Class
The main Method
Using Classes and Objects
Java Program Structure
Java Tokens
Constant, Variable and Datatype
Variables
Declaring Variables
Rules on Variable Names
Variable Types
Primitive Data Types
Constants
Operator and Expression
Expressions and Operators
Operator Precedence
Integer Operators
Relational Integer Operators
Floating point Operators
Relational Floating-Point Operators
String Operator
Assignment Operators
Arithmetic assignment operations
Conversions
Casts
Boolean expressions and operations
Logical operators
Bitwise operations
The complement operator
Decision making, Branching and Looping
Flow control with if and else
The if statement
The else statement
Switch statements
for, while, and do-while statements
The for statement
The while statement
The do-while statement
Using break and continue
The break statement
The continue statement
Class Object and Method
Introduction to Classes
The Benefit of Classes
Defining Classes
Class using constructor
Object
Declaring an Object
Instantiating an Object
Initializing an Object
Referencing an Object's Variables
Calling an Object's Methods
Creating a Class
The Class Declaration
The Class Body
Constructors for Classes
Implementing Methods
The Method Body
A Method's Name
Example of the Math class
The Applet Package
Array and String
Arrays
Java arrays
Object arrays
Strings
String Concatenation
Inheritance
Introduction
Creating Subclasses
Member Variables In Subclass Inherit?
Hiding Member Variables
Methods In Subclass Inherit?
Overriding Methods
Methods a Subclass Cannot Override
Methods a Subclass Must Override
The Benefits of Inheritance
Interfaces
Interfaces and Classes
Implementing and Using Interfaces
Implementing Multiple Interfaces
Creating and Extending Interfaces
Methods Inside Interfaces
Extending Interfaces
Package
Introduction
Declaring Packages
Importing Packages
Creating Our Own Packages
The Java Language Package
The Java I/O Package
The Java Utility Package
The Java Networking Package
The Applet Package
The Abstract Window Toolkit Packages
Multithreading
Introduction
Thread
Thread Attributes
Thread State
Thread Group
Methods that Operate on the Group
Access Restriction Methods
The notifyAll() and wait() Methods
Frequently used Method
Exception Handling
Introduction
What Is an Exception?
If Exceptions than?
The exception handling technique
Some Terminology
Throw an Exception
Throw, try, and catch Blocks
Multiple catch Blocks
The finally Clause
The Throwable Class
Types of Exceptions
Different List of Exception
Built-In Exceptions
Applet
Introduction
How Applets and Applications Are Different
Limitation of Applet
The Applet class
Major Applet Activities
The life cycle of a Web page applet
Including an Applet on a Web Page
Essential HTML to launch an applet and pass it parameters
Launching an applet in an HTML document
A sample applet that receives a parameter
Posting a Web page that launches a custom applet
Managing Input/Output Files in Java
Introduction
Streams
Input Streams
The Abstract Class InputStream
The File class
The FileDialog class
Low-level and high-level stream classes
The FileOutputStream class
The FileInputStream class
The DataOutputStream class
The DataInputStream class
The ObjectOutputStream class
The ObjectInputStream class
Examples of Core Java
A simple program printing 'Hello'.
Writing the first ten multiples of any number using for loop.
To check whether a number is a PERFECT NUMBER or not.
A simple program using accumulator.
Calculate the square of the numbers from 1 to 10
Multithreading
Thread State
Throughout its life, a Java thread is in one of several states. A thread's state indicates what the Thread is doing and what it is capable of doing at that time of its life: is it running? Is it sleeping? Is it dead?
The above diagram illustrates the various states that a Java thread can be in at any point during its life. It also illustrates which method calls cause a transition to another State.
New Thread
The following statement creates a new thread but does not start it, thereby leaving the thread in the "New Thread" state.
Thread myThread = new MyThreadClass();
When a thread is in the "New Thread" state, it is merely an empty Thread object. No system resources have been allocated for it yet. Thus when a thread is in this state, we can only start the thread or stop it. Calling any method besides start() or stop() when a thread is in this state makes no sense and causes an IllegalThreadstateException.
Runnable
Now consider these two lines of code:
Thread myThread = new MyThreadClass();
myThread.start();
The start() method creates the system resources necessary to run the thread, schedules the thread to run, and calls the thread's run() method. At this point the thread is in the "Runnable" state. This state is called "Runnable" rather than "Running" because the thread might not actually be running when it is in this state. Many computers have a single processor, making it impossible to run all "Runnable" threads at the same time. So, the Java runtime system must implement a scheduling scheme that shares the processor between all "Runnable" threads.
Not Runnable
A thread enters the "Not Runnable" state when one of these four events occurs:
1. Someone invokes its sleep() method.
2. Someone invokes its suspend() method.
3. The thread uses its wait() method to wait on a condition variable.
4. The thread is blocking on I/O.
For example,
The bold line in the following code snippet puts the current thread to sleep for 10 seconds (10,000 milliseconds):
try
{
Thread.sleep(10000);
} catch (InterruptedException e)
{
}
During the 10 seconds that myThread is asleep; even if the processor becomes available myThread does not run. After the 10 seconds are up, myThread becomes "Runnable" again and, if the processor becomes available, runs.
If a thread has been put to sleep, then the specified number of milliseconds must elapse before the thread becomes "Runnable" again. Calling resume() on a sleeping thread has no effect.
The following indicates the escape route for every entrance into the "Not Runnable" state.
1. If a thread has been put to sleep, then the specified number of milliseconds must elapse.
2. If a thread has been suspended, then someone must call its resume() method.
3. If a thread is waiting on a condition variable, whatever object owns the variable must relinquish it by calling either notify() or notifyAll().
4. If a thread is blocked on I/O, then the I/O must complete.
Dead
A thread can die in two ways: either from natural causes, or by being killed (stopped). A thread dies naturally when its run() method exits normally.
For example,
The while loop in this method is a finite loop--it will iterate 100 times and then exit.
public void run()
{
int i = 0;
while (i < 100)
{
i++;
System.out.println("i = " + i);
} }
A thread with this run() method will die naturally after the loop and the run() method completes.
We can also kill a thread at any time by calling its stop() method.
The following code snippet creates and starts myThread then puts the current thread to sleep for 10 seconds. When the current thread wakes up, the bold line in the code segment kills myThread.
Thread myThread = new MyThreadClass();
myThread.start();
try
{
Thread.sleep(10000);
} catch (InterruptedException e)
{
}
myThread.stop();
The stop() method throws a ThreadDeath object at the thread to kill it. Thus when a thread is killed in this manner it dies asynchronously. The thread will die when it actually receives the ThreadDeath exception.
IllegalThreadStateException
The runtime system throws an IllegalThreadStateException when we call a method on a thread and that thread's state does not allow for that method call.
For example,
IllegalThreadStateException is thrown when we invoke suspend() on a thread that is not "Runnable".
The isAlive() Method
A final word about thread state: the programming interface for the Thread class includes a method called isAlive().
The isAlive() method returns true if the thread has been started and not stopped.
Thus, if the isAlive() method returns false we know that the thread is either a "New Thread" or "Dead".
If the isAlive() method returns true, we know that the thread is either "Runnable" or "Not Runnable".
We cannot differentiate between a "New Thread" and a "Dead" thread; nor can we differentiate between a "Runnable" thread and a "Not Runnable" thread.
Thread Priority
A thread's priority tells the Java thread scheduler when this thread should run in relation to other threads.
Some points
1. Most computers have only one CPU, thus threads must share the CPU with other threads. The execution of multiple threads on a single CPU, in some order, is called scheduling. The Java runtime supports a very simple, deterministic scheduling algorithm known as fixed priority scheduling.
2. Each Java thread is given a numeric priority between MIN_PRIORITY and MAX_PRIORITY (constants defined in class Thread). At any given time, when multiple threads are ready to be executed, the thread with the highest priority will be chosen for execution. Only when that thread stops, or is suspended for some reason, will a lower priority thread start executing.
3. Scheduling of the CPU is fully preemptive. If a thread with a higher priority than the currently executing thread needs to execute, the higher priority thread is immediately scheduled.
4. The Java runtime will not preempt the currently running thread for another thread of the same priority. In other words, the Java runtime does not time-slice. However, the system implementation of threads underlying the Java Thread class may support time-slicing. Do not write code that relies on time-slicing.
5. In addition, a given thread may, at any time, give up its right to execute by calling the yield() method. Threads can only yield the CPU to other threads of the same
priority--attempts to yield to a lower priority thread are ignored.
6. When all the "Runnable" threads in the system have the same priority, the scheduler chooses the next thread to run in a simple, non-preemptive, round-robin scheduling order.
Daemon Threads
Daemon threads are those that provide a service for other threads in the system. Any Java thread can be a daemon thread.
|
__label__pos
| 0.98493 |
Stack Overflow is a community of 4.7 million programmers, just like you, helping each other.
Join them; it only takes a minute:
Sign up
Join the Stack Overflow community to:
1. Ask programming questions
2. Answer and help your peers
3. Get recognized for your expertise
I would expect the following three associative arrays to be identical:
arr1 = { "dynamic":"foo", "bar":"baz" };
key = "dynamic";
arr2 = { key:"foo", "bar":"baz" };
arr3 = {};
arr3[key] = "foo";
arr3["bar"] = "baz";
In the above examples, arr1 and arr3 are the same, but arr2 is different.
Is it possible to use dynamic keys in the declaration of a javascript associative array?
share|improve this question
1
No you cannot use variables as keys in an object literal declaration. – Musa Jul 24 '13 at 17:09
No – Bergi Jul 24 '13 at 17:30
duplicate of Using a variable for a Javascript object key (and many others) – Bergi Jul 24 '13 at 17:38
up vote 4 down vote accepted
Only the [] syntax works for dynamic keys. You cannot use them in a literal. So your answer is no, it's not possible.
But you can use a literal to create all the static keys and then add the dynamic ones using the [] syntax. That's usually prettier than using the . or [] notation for all elements.
share|improve this answer
Since you asked for a one liner, try this:
var key = 'dynamic', obj = (function(o) { o[key]='foo'; return o;})({bar: 'baz'});
This will make obj equal to {bar: "baz", dynamic: "foo"}
share|improve this answer
I found a solution for this.
Do as following:
var field='name';
var ourVar={};
ourVar[field] = 'Somethig';
Source: Javascript: variable as array key
share|improve this answer
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.979514 |
/* * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_VM_GC_SHARED_SPACE_HPP #define SHARE_VM_GC_SHARED_SPACE_HPP #include "gc/shared/blockOffsetTable.hpp" #include "gc/shared/cardTable.hpp" #include "gc/shared/workgroup.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" #include "memory/memRegion.hpp" #include "oops/markOop.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" // A space is an abstraction for the "storage units" backing // up the generation abstraction. It includes specific // implementations for keeping track of free and used space, // for iterating over objects and free blocks, etc. // Forward decls. class Space; class BlockOffsetArray; class BlockOffsetArrayContigSpace; class Generation; class CompactibleSpace; class BlockOffsetTable; class CardTableRS; class DirtyCardToOopClosure; // A Space describes a heap area. Class Space is an abstract // base class. // // Space supports allocation, size computation and GC support is provided. // // Invariant: bottom() and end() are on page_size boundaries and // bottom() <= top() <= end() // top() is inclusive and end() is exclusive. class Space: public CHeapObj { friend class VMStructs; protected: HeapWord* _bottom; HeapWord* _end; // Used in support of save_marks() HeapWord* _saved_mark_word; // A sequential tasks done structure. This supports // parallel GC, where we have threads dynamically // claiming sub-tasks from a larger parallel task. SequentialSubTasksDone _par_seq_tasks; Space(): _bottom(NULL), _end(NULL) { } public: // Accessors HeapWord* bottom() const { return _bottom; } HeapWord* end() const { return _end; } virtual void set_bottom(HeapWord* value) { _bottom = value; } virtual void set_end(HeapWord* value) { _end = value; } virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } // Returns true if this object has been allocated since a // generation's "save_marks" call. virtual bool obj_allocated_since_save_marks(const oop obj) const { return (HeapWord*)obj >= saved_mark_word(); } virtual MemRegionClosure* preconsumptionDirtyCardClosure() const { return NULL; } // Returns a subregion of the space containing only the allocated objects in // the space. virtual MemRegion used_region() const = 0; // Returns a region that is guaranteed to contain (at least) all objects // allocated at the time of the last call to "save_marks". If the space // initializes its DirtyCardToOopClosure's specifying the "contig" option // (that is, if the space is contiguous), then this region must contain only // such objects: the memregion will be from the bottom of the region to the // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of // the space must distinguish between objects in the region allocated before // and after the call to save marks. MemRegion used_region_at_save_marks() const { return MemRegion(bottom(), saved_mark_word()); } // Initialization. // "initialize" should be called once on a space, before it is used for // any purpose. The "mr" arguments gives the bounds of the space, and // the "clear_space" argument should be true unless the memory in "mr" is // known to be zeroed. virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); // The "clear" method must be called on a region that may have // had allocation performed in it, but is now to be considered empty. virtual void clear(bool mangle_space); // For detecting GC bugs. Should only be called at GC boundaries, since // some unused space may be used as scratch space during GC's. // We also call this when expanding a space to satisfy an allocation // request. See bug #4668531 virtual void mangle_unused_area() = 0; virtual void mangle_unused_area_complete() = 0; // Testers bool is_empty() const { return used() == 0; } bool not_empty() const { return used() > 0; } // Returns true iff the given the space contains the // given address as part of an allocated object. For // certain kinds of spaces, this might be a potentially // expensive operation. To prevent performance problems // on account of its inadvertent use in product jvm's, // we restrict its use to assertion checks only. bool is_in(const void* p) const { return used_region().contains(p); } bool is_in(oop obj) const { return is_in((void*)obj); } // Returns true iff the given reserved memory of the space contains the // given address. bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } // Returns true iff the given block is not allocated. virtual bool is_free_block(const HeapWord* p) const = 0; // Test whether p is double-aligned static bool is_aligned(void* p) { return ::is_aligned(p, sizeof(double)); } // Size computations. Sizes are in bytes. size_t capacity() const { return byte_size(bottom(), end()); } virtual size_t used() const = 0; virtual size_t free() const = 0; // Iterate over all the ref-containing fields of all objects in the // space, calling "cl.do_oop" on each. Fields in objects allocated by // applications of the closure are not included in the iteration. virtual void oop_iterate(OopIterateClosure* cl); // Iterate over all objects in the space, calling "cl.do_object" on // each. Objects allocated by applications of the closure are not // included in the iteration. virtual void object_iterate(ObjectClosure* blk) = 0; // Similar to object_iterate() except only iterates over // objects whose internal references point to objects in the space. virtual void safe_object_iterate(ObjectClosure* blk) = 0; // Create and return a new dirty card to oop closure. Can be // overridden to return the appropriate type of closure // depending on the type of space in which the closure will // operate. ResourceArea allocated. virtual DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl, CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel); // If "p" is in the space, returns the address of the start of the // "block" that contains "p". We say "block" instead of "object" since // some heaps may not pack objects densely; a chunk may either be an // object or a non-object. If "p" is not in the space, return NULL. virtual HeapWord* block_start_const(const void* p) const = 0; // The non-const version may have benevolent side effects on the data // structure supporting these calls, possibly speeding up future calls. // The default implementation, however, is simply to call the const // version. virtual HeapWord* block_start(const void* p); // Requires "addr" to be the start of a chunk, and returns its size. // "addr + size" is required to be the start of a new chunk, or the end // of the active area of the heap. virtual size_t block_size(const HeapWord* addr) const = 0; // Requires "addr" to be the start of a block, and returns "TRUE" iff // the block is an object. virtual bool block_is_obj(const HeapWord* addr) const = 0; // Requires "addr" to be the start of a block, and returns "TRUE" iff // the block is an object and the object is alive. virtual bool obj_is_alive(const HeapWord* addr) const; // Allocation (return NULL if full). Assumes the caller has established // mutually exclusive access to the space. virtual HeapWord* allocate(size_t word_size) = 0; // Allocation (return NULL if full). Enforces mutual exclusion internally. virtual HeapWord* par_allocate(size_t word_size) = 0; #if INCLUDE_SERIALGC // Mark-sweep-compact support: all spaces can update pointers to objects // moving as a part of compaction. virtual void adjust_pointers() = 0; #endif virtual void print() const; virtual void print_on(outputStream* st) const; virtual void print_short() const; virtual void print_short_on(outputStream* st) const; // Accessor for parallel sequential tasks. SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } // IF "this" is a ContiguousSpace, return it, else return NULL. virtual ContiguousSpace* toContiguousSpace() { return NULL; } // Debugging virtual void verify() const = 0; }; // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an // OopClosure to (the addresses of) all the ref-containing fields that could // be modified by virtue of the given MemRegion being dirty. (Note that // because of the imprecise nature of the write barrier, this may iterate // over oops beyond the region.) // This base type for dirty card to oop closures handles memory regions // in non-contiguous spaces with no boundaries, and should be sub-classed // to support other space types. See ContiguousDCTOC for a sub-class // that works with ContiguousSpaces. class DirtyCardToOopClosure: public MemRegionClosureRO { protected: OopIterateClosure* _cl; Space* _sp; CardTable::PrecisionStyle _precision; HeapWord* _boundary; // If non-NULL, process only non-NULL oops // pointing below boundary. HeapWord* _min_done; // ObjHeadPreciseArray precision requires // a downwards traversal; this is the // lowest location already done (or, // alternatively, the lowest address that // shouldn't be done again. NULL means infinity.) NOT_PRODUCT(HeapWord* _last_bottom;) NOT_PRODUCT(HeapWord* _last_explicit_min_done;) // Get the actual top of the area on which the closure will // operate, given where the top is assumed to be (the end of the // memory region passed to do_MemRegion) and where the object // at the top is assumed to start. For example, an object may // start at the top but actually extend past the assumed top, // in which case the top becomes the end of the object. virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); // Walk the given memory region from bottom to (actual) top // looking for objects and applying the oop closure (_cl) to // them. The base implementation of this treats the area as // blocks, where a block may or may not be an object. Sub- // classes should override this to provide more accurate // or possibly more efficient walking. virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); public: DirtyCardToOopClosure(Space* sp, OopIterateClosure* cl, CardTable::PrecisionStyle precision, HeapWord* boundary) : _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), _min_done(NULL) { NOT_PRODUCT(_last_bottom = NULL); NOT_PRODUCT(_last_explicit_min_done = NULL); } void do_MemRegion(MemRegion mr); void set_min_done(HeapWord* min_done) { _min_done = min_done; NOT_PRODUCT(_last_explicit_min_done = _min_done); } #ifndef PRODUCT void set_last_bottom(HeapWord* last_bottom) { _last_bottom = last_bottom; } #endif }; // A structure to represent a point at which objects are being copied // during compaction. class CompactPoint : public StackObj { public: Generation* gen; CompactibleSpace* space; HeapWord* threshold; CompactPoint(Generation* g = NULL) : gen(g), space(NULL), threshold(0) {} }; // A space that supports compaction operations. This is usually, but not // necessarily, a space that is normally contiguous. But, for example, a // free-list-based space whose normal collection is a mark-sweep without // compaction could still support compaction in full GC's. // // The compaction operations are implemented by the // scan_and_{adjust_pointers,compact,forward} function templates. // The following are, non-virtual, auxiliary functions used by these function templates: // - scan_limit() // - scanned_block_is_obj() // - scanned_block_size() // - adjust_obj_size() // - obj_size() // These functions are to be used exclusively by the scan_and_* function templates, // and must be defined for all (non-abstract) subclasses of CompactibleSpace. // // NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior // in any of the auxiliary functions must also override the corresponding // prepare_for_compaction/adjust_pointers/compact functions using them. // If not, such changes will not be used or have no effect on the compaction operations. // // This translates to the following dependencies: // Overrides/definitions of // - scan_limit // - scanned_block_is_obj // - scanned_block_size // require override/definition of prepare_for_compaction(). // Similar dependencies exist between // - adjust_obj_size and adjust_pointers() // - obj_size and compact(). // // Additionally, this also means that changes to block_size() or block_is_obj() that // should be effective during the compaction operations must provide a corresponding // definition of scanned_block_size/scanned_block_is_obj respectively. class CompactibleSpace: public Space { friend class VMStructs; friend class CompactibleFreeListSpace; private: HeapWord* _compaction_top; CompactibleSpace* _next_compaction_space; // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. inline size_t adjust_obj_size(size_t size) const { return size; } inline size_t obj_size(const HeapWord* addr) const; template static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN; template static inline void clear_empty_region(SpaceType* space); public: CompactibleSpace() : _compaction_top(NULL), _next_compaction_space(NULL) {} virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); virtual void clear(bool mangle_space); // Used temporarily during a compaction phase to hold the value // top should have when compaction is complete. HeapWord* compaction_top() const { return _compaction_top; } void set_compaction_top(HeapWord* value) { assert(value == NULL || (value >= bottom() && value <= end()), "should point inside space"); _compaction_top = value; } // Perform operations on the space needed after a compaction // has been performed. virtual void reset_after_compaction() = 0; // Returns the next space (in the current generation) to be compacted in // the global compaction order. Also is used to select the next // space into which to compact. virtual CompactibleSpace* next_compaction_space() const { return _next_compaction_space; } void set_next_compaction_space(CompactibleSpace* csp) { _next_compaction_space = csp; } #if INCLUDE_SERIALGC // MarkSweep support phase2 // Start the process of compaction of the current space: compute // post-compaction addresses, and insert forwarding pointers. The fields // "cp->gen" and "cp->compaction_space" are the generation and space into // which we are currently compacting. This call updates "cp" as necessary, // and leaves the "compaction_top" of the final value of // "cp->compaction_space" up-to-date. Offset tables may be updated in // this phase as if the final copy had occurred; if so, "cp->threshold" // indicates when the next such action should be taken. virtual void prepare_for_compaction(CompactPoint* cp) = 0; // MarkSweep support phase3 virtual void adjust_pointers(); // MarkSweep support phase4 virtual void compact(); #endif // INCLUDE_SERIALGC // The maximum percentage of objects that can be dead in the compacted // live part of a compacted space ("deadwood" support.) virtual size_t allowed_dead_ratio() const { return 0; }; // Some contiguous spaces may maintain some data structures that should // be updated whenever an allocation crosses a boundary. This function // returns the first such boundary. // (The default implementation returns the end of the space, so the // boundary is never crossed.) virtual HeapWord* initialize_threshold() { return end(); } // "q" is an object of the given "size" that should be forwarded; // "cp" names the generation ("gen") and containing "this" (which must // also equal "cp->space"). "compact_top" is where in "this" the // next object should be forwarded to. If there is room in "this" for // the object, insert an appropriate forwarding pointer in "q". // If not, go to the next compaction space (there must // be one, since compaction must succeed -- we go to the first space of // the previous generation if necessary, updating "cp"), reset compact_top // and then forward. In either case, returns the new value of "compact_top". // If the forwarding crosses "cp->threshold", invokes the "cross_threshold" // function of the then-current compaction space, and updates "cp->threshold // accordingly". virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); // Return a size with adjustments as required of the space. virtual size_t adjust_object_size_v(size_t size) const { return size; } void set_first_dead(HeapWord* value) { _first_dead = value; } void set_end_of_live(HeapWord* value) { _end_of_live = value; } protected: // Used during compaction. HeapWord* _first_dead; HeapWord* _end_of_live; // Minimum size of a free block. virtual size_t minimum_free_block_size() const { return 0; } // This the function is invoked when an allocation of an object covering // "start" to "end occurs crosses the threshold; returns the next // threshold. (The default implementation does nothing.) virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { return end(); } // Below are template functions for scan_and_* algorithms (avoiding virtual calls). // The space argument should be a subclass of CompactibleSpace, implementing // scan_limit(), scanned_block_is_obj(), and scanned_block_size(), // and possibly also overriding obj_size(), and adjust_obj_size(). // These functions should avoid virtual calls whenever possible. #if INCLUDE_SERIALGC // Frequently calls adjust_obj_size(). template static inline void scan_and_adjust_pointers(SpaceType* space); // Frequently calls obj_size(). template static inline void scan_and_compact(SpaceType* space); // Frequently calls scanned_block_is_obj() and scanned_block_size(). // Requires the scan_limit() function. template static inline void scan_and_forward(SpaceType* space, CompactPoint* cp); #endif }; class GenSpaceMangler; // A space in which the free area is contiguous. It therefore supports // faster allocation, and compaction. class ContiguousSpace: public CompactibleSpace { friend class VMStructs; #if INCLUDE_SERIALGC // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class template friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); #endif private: // Auxiliary functions for scan_and_forward support. // See comments for CompactibleSpace for more information. inline HeapWord* scan_limit() const { return top(); } inline bool scanned_block_is_obj(const HeapWord* addr) const { return true; // Always true, since scan_limit is top } inline size_t scanned_block_size(const HeapWord* addr) const; protected: HeapWord* _top; HeapWord* _concurrent_iteration_safe_limit; // A helper for mangling the unused area of the space in debug builds. GenSpaceMangler* _mangler; GenSpaceMangler* mangler() { return _mangler; } // Allocation helpers (return NULL if full). inline HeapWord* allocate_impl(size_t word_size); inline HeapWord* par_allocate_impl(size_t word_size); public: ContiguousSpace(); ~ContiguousSpace(); virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); virtual void clear(bool mangle_space); // Accessors HeapWord* top() const { return _top; } void set_top(HeapWord* value) { _top = value; } void set_saved_mark() { _saved_mark_word = top(); } void reset_saved_mark() { _saved_mark_word = bottom(); } bool saved_mark_at_top() const { return saved_mark_word() == top(); } // In debug mode mangle (write it with a particular bit // pattern) the unused part of a space. // Used to save the an address in a space for later use during mangling. void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; // Used to save the space's current top for later use during mangling. void set_top_for_allocations() PRODUCT_RETURN; // Mangle regions in the space from the current top up to the // previously mangled part of the space. void mangle_unused_area() PRODUCT_RETURN; // Mangle [top, end) void mangle_unused_area_complete() PRODUCT_RETURN; // Do some sparse checking on the area that should have been mangled. void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; // Check the complete area that should have been mangled. // This code may be NULL depending on the macro DEBUG_MANGLING. void check_mangled_unused_area_complete() PRODUCT_RETURN; // Size computations: sizes in bytes. size_t capacity() const { return byte_size(bottom(), end()); } size_t used() const { return byte_size(bottom(), top()); } size_t free() const { return byte_size(top(), end()); } virtual bool is_free_block(const HeapWord* p) const; // In a contiguous space we have a more obvious bound on what parts // contain objects. MemRegion used_region() const { return MemRegion(bottom(), top()); } // Allocation (return NULL if full) virtual HeapWord* allocate(size_t word_size); virtual HeapWord* par_allocate(size_t word_size); HeapWord* allocate_aligned(size_t word_size); // Iteration void oop_iterate(OopIterateClosure* cl); void object_iterate(ObjectClosure* blk); // For contiguous spaces this method will iterate safely over objects // in the space (i.e., between bottom and top) when at a safepoint. void safe_object_iterate(ObjectClosure* blk); // Iterate over as many initialized objects in the space as possible, // calling "cl.do_object_careful" on each. Return NULL if all objects // in the space (at the start of the iteration) were iterated over. // Return an address indicating the extent of the iteration in the // event that the iteration had to return because of finding an // uninitialized object in the space, or if the closure "cl" // signaled early termination. HeapWord* object_iterate_careful(ObjectClosureCareful* cl); HeapWord* concurrent_iteration_safe_limit() { assert(_concurrent_iteration_safe_limit <= top(), "_concurrent_iteration_safe_limit update missed"); return _concurrent_iteration_safe_limit; } // changes the safe limit, all objects from bottom() to the new // limit should be properly initialized void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { assert(new_limit <= top(), "uninitialized objects in the safe range"); _concurrent_iteration_safe_limit = new_limit; } // In support of parallel oop_iterate. template void par_oop_iterate(MemRegion mr, OopClosureType* blk); // Compaction support virtual void reset_after_compaction() { assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); set_top(compaction_top()); // set new iteration safe limit set_concurrent_iteration_safe_limit(compaction_top()); } // Override. DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl, CardTable::PrecisionStyle precision, HeapWord* boundary, bool parallel); // Apply "blk->do_oop" to the addresses of all reference fields in objects // starting with the _saved_mark_word, which was noted during a generation's // save_marks and is required to denote the head of an object. // Fields in objects allocated by applications of the closure // *are* included in the iteration. // Updates _saved_mark_word to point to just after the last object // iterated over. template void oop_since_save_marks_iterate(OopClosureType* blk); // Same as object_iterate, but starting from "mark", which is required // to denote the start of an object. Objects allocated by // applications of the closure *are* included in the iteration. virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk); // Very inefficient implementation. virtual HeapWord* block_start_const(const void* p) const; size_t block_size(const HeapWord* p) const; // If a block is in the allocated area, it is an object. bool block_is_obj(const HeapWord* p) const { return p < top(); } // Addresses for inlined allocation HeapWord** top_addr() { return &_top; } HeapWord** end_addr() { return &_end; } #if INCLUDE_SERIALGC // Overrides for more efficient compaction support. void prepare_for_compaction(CompactPoint* cp); #endif virtual void print_on(outputStream* st) const; // Checked dynamic downcasts. virtual ContiguousSpace* toContiguousSpace() { return this; } // Debugging virtual void verify() const; // Used to increase collection frequency. "factor" of 0 means entire // space. void allocate_temporary_filler(int factor); }; // A dirty card to oop closure that does filtering. // It knows how to filter out objects that are outside of the _boundary. class FilteringDCTOC : public DirtyCardToOopClosure { protected: // Override. void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); // Walk the given memory region, from bottom to top, applying // the given oop closure to (possibly) all objects found. The // given oop closure may or may not be the same as the oop // closure with which this closure was created, as it may // be a filtering closure which makes use of the _boundary. // We offer two signatures, so the FilteringClosure static type is // apparent. virtual void walk_mem_region_with_cl(MemRegion mr, HeapWord* bottom, HeapWord* top, OopIterateClosure* cl) = 0; virtual void walk_mem_region_with_cl(MemRegion mr, HeapWord* bottom, HeapWord* top, FilteringClosure* cl) = 0; public: FilteringDCTOC(Space* sp, OopIterateClosure* cl, CardTable::PrecisionStyle precision, HeapWord* boundary) : DirtyCardToOopClosure(sp, cl, precision, boundary) {} }; // A dirty card to oop closure for contiguous spaces // (ContiguousSpace and sub-classes). // It is a FilteringClosure, as defined above, and it knows: // // 1. That the actual top of any area in a memory region // contained by the space is bounded by the end of the contiguous // region of the space. // 2. That the space is really made up of objects and not just // blocks. class ContiguousSpaceDCTOC : public FilteringDCTOC { protected: // Overrides. HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); virtual void walk_mem_region_with_cl(MemRegion mr, HeapWord* bottom, HeapWord* top, OopIterateClosure* cl); virtual void walk_mem_region_with_cl(MemRegion mr, HeapWord* bottom, HeapWord* top, FilteringClosure* cl); public: ContiguousSpaceDCTOC(ContiguousSpace* sp, OopIterateClosure* cl, CardTable::PrecisionStyle precision, HeapWord* boundary) : FilteringDCTOC(sp, cl, precision, boundary) {} }; // A ContigSpace that Supports an efficient "block_start" operation via // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with // other spaces.) This is the abstract base class for old generation // (tenured) spaces. class OffsetTableContigSpace: public ContiguousSpace { friend class VMStructs; protected: BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; public: // Constructor OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr); void set_bottom(HeapWord* value); void set_end(HeapWord* value); void clear(bool mangle_space); inline HeapWord* block_start_const(const void* p) const; // Add offset table update. virtual inline HeapWord* allocate(size_t word_size); inline HeapWord* par_allocate(size_t word_size); // MarkSweep support phase3 virtual HeapWord* initialize_threshold(); virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); virtual void print_on(outputStream* st) const; // Debugging void verify() const; }; // Class TenuredSpace is used by TenuredGeneration class TenuredSpace: public OffsetTableContigSpace { friend class VMStructs; protected: // Mark sweep support size_t allowed_dead_ratio() const; public: // Constructor TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : OffsetTableContigSpace(sharedOffsetArray, mr) {} }; #endif // SHARE_VM_GC_SHARED_SPACE_HPP
|
__label__pos
| 0.99663 |
Learning & Discussion
- C/C++
- Linux: 在 Linux User Space 使用 TSC (x86 架構下)
TSC
The Time Stamp Counter (TSC) is a 64-bit timer register in Pentium-compatible processors that counts the number of clock cycles consumed by the processor. TSC provides a high-resolution timer and is commonly used for profiling and instrumenting code.
The count of ten millisecond jiffies is the primary means by which the original timer tracks time. Unfortunately, the assumed jiffy length is rarely correct, because the hardware timer chip has a finite resolution and the smallest value by which it can be incremented usually does not divide evenly into ten milliseconds. The accumulation of error due to the imprecision in the jiffy length causes the software clock to drift over time. However, TSC is more precise and hence lead to less clock drift. At each interrupt, a TSC reading is taken and used to compute the number of cycles elapsed since boot time.
TSC can be accessed using the rdtsc instruction to measure execution time of intervening code with microsecond precision. TSC ticks can be converted to seconds by dividing by the CPU clock speed, which can be read from the kernel variable, cpu_khz.
RDTSC
• Instruction to read TSC in X86 assembly language.
• The instruction returns a 64 bit value using 2 32-bit registers EDX:EAX.
Functions:
• gettsc()
unsigned long long gettsc(void) { //get TSC value from EDX:EAX
unsigned long ax, dx;
asm volatile("rdtsc" : "=a" (ax), "=d" (dx));
return ((unsigned long long)dx << 32) + ax;
}
• Convert Cycles to Time
unsigned long long gettsctime(unsigned long long tsc, unsigned
long long *t) { //tsc increments one unit by one clock cycle, so we get time by taking tsc divided by clock rate.
float mhz;
unsigned long long tmp;
mhz = getmhz();
*t = (unsigned long long)((double)tsc/(double)mhz*1000);
}
• getmhz()
1. In Linux
2. Parse /proc/cpuinfo to get CPU Mhz.
Example:
• Time Measurement
void main() //measure the elapsed time of "sleep(3)"
{
unsigned long long tsc, start, end;
tsc = gettsc(); //get tsc value
gettsctime(tsc, &start); //get time at the start of measured program interval
sleep(3); //measured program interval
tsc = gettsc(); //get tsc value
gettsctime(tsc, &end); //get time at the end of measured program interval
gettscinterval(start, end); //get time interval of measured program
}
documents/tsc/tsc.txt · 上一次變更: 2008/08/31 22:38 由 crilit
CC Attribution-Noncommercial-Share Alike 3.0 Unported
www.chimeric.de Valid CSS Driven by DokuWiki do yourself a favour and use a real browser - get firefox!! Recent changes RSS feed Valid XHTML 1.0
|
__label__pos
| 0.894436 |
Cyl"in*droid (s?l"?n-droid), n. [Gr. cylinder + -oid: cf. F. cylindrode.]
1.
A solid body resembling a right cylinder, but having the bases or ends elliptical.
2. Geom.
A certain surface of the third degree, described by a moving straight line; -- used to illustrate the motions of a rigid body and also the forces acting on the body.
© Webster 1913.
|
__label__pos
| 0.595538 |
3.370 \(\int \frac{e^{n \coth ^{-1}(a x)}}{(c-a c x)^3} \, dx\)
Optimal. Leaf size=104 \[ \frac{\left (1-\frac{1}{a x}\right )^{-\frac{n}{2}-2} \left (\frac{1}{a x}+1\right )^{\frac{n+2}{2}}}{a c^3 (n+4)}-\frac{(n+3) \left (1-\frac{1}{a x}\right )^{-\frac{n}{2}-1} \left (\frac{1}{a x}+1\right )^{\frac{n+2}{2}}}{a c^3 (n+2) (n+4)} \]
[Out]
((1 - 1/(a*x))^(-2 - n/2)*(1 + 1/(a*x))^((2 + n)/2))/(a*c^3*(4 + n)) - ((3 + n)*(1 - 1/(a*x))^(-1 - n/2)*(1 +
1/(a*x))^((2 + n)/2))/(a*c^3*(2 + n)*(4 + n))
________________________________________________________________________________________
Rubi [A] time = 0.149331, antiderivative size = 104, normalized size of antiderivative = 1., number of steps used = 4, number of rules used = 4, integrand size = 18, \(\frac{\text{number of rules}}{\text{integrand size}}\) = 0.222, Rules used = {6175, 6180, 79, 37} \[ \frac{\left (1-\frac{1}{a x}\right )^{-\frac{n}{2}-2} \left (\frac{1}{a x}+1\right )^{\frac{n+2}{2}}}{a c^3 (n+4)}-\frac{(n+3) \left (1-\frac{1}{a x}\right )^{-\frac{n}{2}-1} \left (\frac{1}{a x}+1\right )^{\frac{n+2}{2}}}{a c^3 (n+2) (n+4)} \]
Antiderivative was successfully verified.
[In]
Int[E^(n*ArcCoth[a*x])/(c - a*c*x)^3,x]
[Out]
((1 - 1/(a*x))^(-2 - n/2)*(1 + 1/(a*x))^((2 + n)/2))/(a*c^3*(4 + n)) - ((3 + n)*(1 - 1/(a*x))^(-1 - n/2)*(1 +
1/(a*x))^((2 + n)/2))/(a*c^3*(2 + n)*(4 + n))
Rule 6175
Int[E^(ArcCoth[(a_.)*(x_)]*(n_.))*(u_.)*((c_) + (d_.)*(x_))^(p_.), x_Symbol] :> Dist[d^p, Int[u*x^p*(1 + c/(d*
x))^p*E^(n*ArcCoth[a*x]), x], x] /; FreeQ[{a, c, d, n}, x] && EqQ[a^2*c^2 - d^2, 0] && !IntegerQ[n/2] && Inte
gerQ[p]
Rule 6180
Int[E^(ArcCoth[(a_.)*(x_)]*(n_.))*((c_) + (d_.)/(x_))^(p_.)*(x_)^(m_.), x_Symbol] :> -Dist[c^p, Subst[Int[((1
+ (d*x)/c)^p*(1 + x/a)^(n/2))/(x^(m + 2)*(1 - x/a)^(n/2)), x], x, 1/x], x] /; FreeQ[{a, c, d, n, p}, x] && EqQ
[c^2 - a^2*d^2, 0] && !IntegerQ[n/2] && (IntegerQ[p] || GtQ[c, 0]) && IntegerQ[m]
Rule 79
Int[((a_.) + (b_.)*(x_))*((c_.) + (d_.)*(x_))^(n_.)*((e_.) + (f_.)*(x_))^(p_.), x_Symbol] :> -Simp[((b*e - a*f
)*(c + d*x)^(n + 1)*(e + f*x)^(p + 1))/(f*(p + 1)*(c*f - d*e)), x] - Dist[(a*d*f*(n + p + 2) - b*(d*e*(n + 1)
+ c*f*(p + 1)))/(f*(p + 1)*(c*f - d*e)), Int[(c + d*x)^n*(e + f*x)^Simplify[p + 1], x], x] /; FreeQ[{a, b, c,
d, e, f, n, p}, x] && !RationalQ[p] && SumSimplerQ[p, 1]
Rule 37
Int[((a_.) + (b_.)*(x_))^(m_.)*((c_.) + (d_.)*(x_))^(n_), x_Symbol] :> Simp[((a + b*x)^(m + 1)*(c + d*x)^(n +
1))/((b*c - a*d)*(m + 1)), x] /; FreeQ[{a, b, c, d, m, n}, x] && NeQ[b*c - a*d, 0] && EqQ[m + n + 2, 0] && NeQ
[m, -1]
Rubi steps
\begin{align*} \int \frac{e^{n \coth ^{-1}(a x)}}{(c-a c x)^3} \, dx &=-\frac{\int \frac{e^{n \coth ^{-1}(a x)}}{\left (1-\frac{1}{a x}\right )^3 x^3} \, dx}{a^3 c^3}\\ &=\frac{\operatorname{Subst}\left (\int x \left (1-\frac{x}{a}\right )^{-3-\frac{n}{2}} \left (1+\frac{x}{a}\right )^{n/2} \, dx,x,\frac{1}{x}\right )}{a^3 c^3}\\ &=\frac{\left (1-\frac{1}{a x}\right )^{-2-\frac{n}{2}} \left (1+\frac{1}{a x}\right )^{\frac{2+n}{2}}}{a c^3 (4+n)}-\frac{(3+n) \operatorname{Subst}\left (\int \left (1-\frac{x}{a}\right )^{-2-\frac{n}{2}} \left (1+\frac{x}{a}\right )^{n/2} \, dx,x,\frac{1}{x}\right )}{a^2 c^3 (4+n)}\\ &=\frac{\left (1-\frac{1}{a x}\right )^{-2-\frac{n}{2}} \left (1+\frac{1}{a x}\right )^{\frac{2+n}{2}}}{a c^3 (4+n)}-\frac{(3+n) \left (1-\frac{1}{a x}\right )^{-1-\frac{n}{2}} \left (1+\frac{1}{a x}\right )^{\frac{2+n}{2}}}{a c^3 (2+n) (4+n)}\\ \end{align*}
Mathematica [A] time = 0.216626, size = 64, normalized size = 0.62 \[ \frac{(-a x+n+3) e^{n \coth ^{-1}(a x)} \left (\cosh \left (3 \coth ^{-1}(a x)\right )+\sinh \left (3 \coth ^{-1}(a x)\right )\right )}{a^2 c^3 (n+2) (n+4) x \sqrt{1-\frac{1}{a^2 x^2}}} \]
Antiderivative was successfully verified.
[In]
Integrate[E^(n*ArcCoth[a*x])/(c - a*c*x)^3,x]
[Out]
(E^(n*ArcCoth[a*x])*(3 + n - a*x)*(Cosh[3*ArcCoth[a*x]] + Sinh[3*ArcCoth[a*x]]))/(a^2*c^3*(2 + n)*(4 + n)*Sqrt
[1 - 1/(a^2*x^2)]*x)
________________________________________________________________________________________
Maple [A] time = 0.049, size = 46, normalized size = 0.4 \begin{align*} -{\frac{{{\rm e}^{n{\rm arccoth} \left (ax\right )}} \left ( ax-n-3 \right ) \left ( ax+1 \right ) }{ \left ( ax-1 \right ) ^{2}{c}^{3} \left ({n}^{2}+6\,n+8 \right ) a}} \end{align*}
Verification of antiderivative is not currently implemented for this CAS.
[In]
int(exp(n*arccoth(a*x))/(-a*c*x+c)^3,x)
[Out]
-exp(n*arccoth(a*x))*(a*x-n-3)*(a*x+1)/(a*x-1)^2/c^3/(n^2+6*n+8)/a
________________________________________________________________________________________
Maxima [F] time = 0., size = 0, normalized size = 0. \begin{align*} -\int \frac{\left (\frac{a x - 1}{a x + 1}\right )^{\frac{1}{2} \, n}}{{\left (a c x - c\right )}^{3}}\,{d x} \end{align*}
Verification of antiderivative is not currently implemented for this CAS.
[In]
integrate(exp(n*arccoth(a*x))/(-a*c*x+c)^3,x, algorithm="maxima")
[Out]
-integrate(((a*x - 1)/(a*x + 1))^(1/2*n)/(a*c*x - c)^3, x)
________________________________________________________________________________________
Fricas [A] time = 1.62983, size = 259, normalized size = 2.49 \begin{align*} -\frac{{\left (a^{2} x^{2} +{\left (a n - 2 \, a\right )} x + n - 3\right )} \left (\frac{a x - 1}{a x + 1}\right )^{\frac{1}{2} \, n}}{a c^{3} n^{2} - 6 \, a c^{3} n + 8 \, a c^{3} +{\left (a^{3} c^{3} n^{2} - 6 \, a^{3} c^{3} n + 8 \, a^{3} c^{3}\right )} x^{2} - 2 \,{\left (a^{2} c^{3} n^{2} - 6 \, a^{2} c^{3} n + 8 \, a^{2} c^{3}\right )} x} \end{align*}
Verification of antiderivative is not currently implemented for this CAS.
[In]
integrate(exp(n*arccoth(a*x))/(-a*c*x+c)^3,x, algorithm="fricas")
[Out]
-(a^2*x^2 + (a*n - 2*a)*x + n - 3)*((a*x - 1)/(a*x + 1))^(1/2*n)/(a*c^3*n^2 - 6*a*c^3*n + 8*a*c^3 + (a^3*c^3*n
^2 - 6*a^3*c^3*n + 8*a^3*c^3)*x^2 - 2*(a^2*c^3*n^2 - 6*a^2*c^3*n + 8*a^2*c^3)*x)
________________________________________________________________________________________
Sympy [F(-1)] time = 0., size = 0, normalized size = 0. \begin{align*} \text{Timed out} \end{align*}
Verification of antiderivative is not currently implemented for this CAS.
[In]
integrate(exp(n*acoth(a*x))/(-a*c*x+c)**3,x)
[Out]
Timed out
________________________________________________________________________________________
Giac [F] time = 0., size = 0, normalized size = 0. \begin{align*} \int -\frac{\left (\frac{a x - 1}{a x + 1}\right )^{\frac{1}{2} \, n}}{{\left (a c x - c\right )}^{3}}\,{d x} \end{align*}
Verification of antiderivative is not currently implemented for this CAS.
[In]
integrate(exp(n*arccoth(a*x))/(-a*c*x+c)^3,x, algorithm="giac")
[Out]
integrate(-((a*x - 1)/(a*x + 1))^(1/2*n)/(a*c*x - c)^3, x)
|
__label__pos
| 1 |
fetchTimestamp: Update timestamp file if needed
Description Usage Arguments Value
View source: R/fetchTimestamp.R
Description
fetchTimestamp updates a timestamp file if the local copy of a file is out of date relative to the data source. In practice, the desired definition of 'out of date' differs widely among data sources, sizes, and projects, so fetchTimestamp is a generic function with a method specific to each fetcher. Every fetchTimestamp method should write a timestamp file, or not, as required to achieve the desired make behavior. See fetchTimestampMethods and fetchTimestampHelpers for assistance in defining new fetchTimestamp methods. Also consider preferences.yaml and exceededTimeToLive() for additional options for managing data fetches.
'fetchTimestamp.sciencebase' gets the file timestamp from ScienceBase.
'fetchTimestamp.file' creates a timestamp file once, with file metadata and file contents that both match the timestamp of the viz data file. If the timestamp file already exists, it is only modified if the data file is updated.
'fetchTimestamp.url' checks a URL for a timestamp. The URL headers must include a 'last-modified' field; otherwise, this method breaks and you should write your own for the specific URL in question.
fetchTimestamp.fetcher superclass method catches missing implementation
Usage
1
2
3
4
5
6
7
8
9
10
11
12
13
fetchTimestamp(viz)
## S3 method for class 'sciencebase'
fetchTimestamp(viz)
## S3 method for class 'file'
fetchTimestamp(viz)
## S3 method for class 'url'
fetchTimestamp(viz)
## S3 method for class 'fetcher'
fetchTimestamp(viz)
Arguments
viz
the identifier for a fetch item in viz.yaml
Value
It doesn't matter what a fetchTimestamp method returns. It matters a lot more whether it creates/updates the timestamp file or not.
USGS-VIZLAB/vizlab documentation built on Nov. 17, 2018, 1:31 a.m.
|
__label__pos
| 0.512332 |
is CM 10.1 better than stock? is it worth the trouble?
Discussion in 'Samsung Galaxy S3' started by jwilly216, Mar 19, 2013.
1. jwilly216
jwilly216 Member
Joined:
Jun 22, 2012
Messages:
416
Likes Received:
3
Trophy Points:
18
Ratings:
+3
Stock gs3 is pretty damn good so I'm wondering if it's worth it to root and flash CM. Is it really any better than stock touchwiz?
2. WillisD
WillisD Member
Joined:
Oct 26, 2010
Messages:
203
Likes Received:
1
Trophy Points:
18
Location:
Virginia
Ratings:
+1
Its all in what u like, and rooting is painless
3. DroidBoardr
DroidBoardr Member
Joined:
Mar 12, 2012
Messages:
377
Likes Received:
1
Trophy Points:
18
Ratings:
+1
except that rooting and ROM'ing will cause you to loose any app data that you currently have, which could be a pain for alot of people. I too have been considering rooting because i just enjoy playing with things, but there is so much on this phone that i don't want to lose.
4. Nemo Aeternamn
Nemo Aeternamn Droid X Rescue Squad Rescue Squad Premium Member
Joined:
Sep 10, 2011
Messages:
793
Likes Received:
13
Trophy Points:
18
Location:
Mo-ab
Ratings:
+13
You can use apps like titanium back and my bank up pro.. Carbon back up.. There's some others.. But you can use this apps to back your data and app data.. So when you flash roms you can restore it simple enough
Cellar Door
"Who are you people and where is my horse?"
5. DroidBoardr
DroidBoardr Member
Joined:
Mar 12, 2012
Messages:
377
Likes Received:
1
Trophy Points:
18
Ratings:
+1
From my understanding those programs can only be used after I have rooted the first time, which will erase the data. So in the future I won't lose the data when going between roms, but i will lose it this time. Am I mistaken in that?
6. WillisD
WillisD Member
Joined:
Oct 26, 2010
Messages:
203
Likes Received:
1
Trophy Points:
18
Location:
Virginia
Ratings:
+1
Rooting itself does not make you lose your data, flashing a custom ROM does, so you backup apps and data before the custom ROM. Make a backup of your current setup, and you can always go back to just plain rooted
7. DroidBoardr
DroidBoardr Member
Joined:
Mar 12, 2012
Messages:
377
Likes Received:
1
Trophy Points:
18
Ratings:
+1
Well isnt that something. That has been my hangup. Looks like I will have to reevaluate my decision. Thanks.
8. k1ngr4t
k1ngr4t Senior Member
Joined:
May 28, 2011
Messages:
1,433
Likes Received:
69
Trophy Points:
48
Ratings:
+71
Current Phone Model:
Turbo Power Activate
Once everything is downloaded, which shouldn't take more than 5-10 minutes, it'll be less than 5 minutes till you're up and running again. It's a very simple process that we shouldn't have to go through...we should just have that sort of access. But there are tons of apps with "root" settings that make them function so much smoother.
Search tags for this page
cm 10.1 better than 4.1
,
cm 10.1 versus stock
,
cm vs stock s3 10.1
,
droid bionic carbon vs cm10.1
,
galaxy s3 cm vs stock rom
,
is cm 10.1 faster than touchwiz
,
s3 cm vs stock
,
stock s3 vs custom
,
thanstock
,
why cm is better tan stock
|
__label__pos
| 0.997018 |
[Free] 2018(June) Dumps4cert Cisco 400-051 Dumps with VCE and PDF Download 191-200
Dumps4cert.com : Latest Dumps with PDF and VCE Files
2018 May Cisco Official New Released 400-051
100% Free Download! 100% Pass Guaranteed!
CCIE Collaboration (v1.1)
Question No: 191 – (Topic 1)
The Video engineer wants to enable the LATM codec to allow video endpoint to communicate over audio with other IP devices. Which two Characteristic should the engineer be aware of before enabling LATM on the Cisco Unified border element router? (Choose two)
1. Dual tone Multifrequency interworking with LATM codec is not supported.
2. Codec transcoding between LATM and other codecs is not supported.
3. SIP UPDATE massage outlined in RFC3311 is not supported.
4. Box-to-Box High availability support feature is not supported.
5. Configure LATM under a voice class or dial peer is not supported.
6. Basic calls using flow-around or flow-through is not supported.
Answer: A,B Explanation:
Dumps4Cert 2018 PDF and VCE
http://www.cisco.com/c/en/us/td/docs/ios-xml/ios/voice/cube/configuration/cube-book.pdf
Question No: 192 – (Topic 1)
The Video engineer wants to enable the LATM codec to allow video endpoint to communicate over audio with other IP devices. Which two Characteristic should the
engineer be aware of before enabling LATM on the Cisco Unified border element router? (Choose two)
1. Dual tone Multifrequency interworking with LATM codec is not supported.
2. Codec transcoding between LATM and other codecs is not supported.
3. SIP UPDATE massage outlined in RFC3311 is not supported.
4. Box-to-Box High availability support feature is not supported.
5. Configure LATM under a voice class or dial peer is not supported.
6. Basic calls using flow-around or flow-through is not supported.
Answer: A,B
Question No: 193 – (Topic 1)
A Jabber for window user is on a call with cisco telepresence EX90 endpoint at the same location. During the call, the video on the jabber for Windows application was high quality but the video on the EX90 was choppy and slow. When the administrator checked the service rate on the EX90 it showed 2048 Kbps. Which two configuration changes can fix this problem?
1. Lower the bit rate in the region configuration in communication manager between the endpoints.
2. Increase the location bandwidth for immersive video between the endpoints
3. Enable BFCP in the SIP profile for the jabber client
4. Enable H.263 on the EX90
5. Replace the camera for the jabber user with the precision HD USB camera
6. Increase the bandwidth between the jabber video client and the EX90
Answer: E,F
Question No: 194 – (Topic 1)
Refer to the exhibit.
Dumps4Cert 2018 PDF and VCE
An engineer is trying to provision an CUCME with three 8841 phones. However all phones
fail to register. Which two changes in the configuration would allow the phones to register? (Choose two.)
A.
The registrar server command must be added under the VOICE register global configuration.
1. The IP address trusted authenticate command must be added under voice service voip.
2. The source-address command must be added under the voice register global configuration.
3. The local SIP proxy address must be configuration under the sip-ua configuration.
4. The registrar server command must be added under the sip section of voice service voip.
Answer: C,E
Question No: 195 – (Topic 1)
Which two rules apply to MMOH in SRST? (Choose two.)
1. A maximum of three MOH groups are allowed.
2. Cisco Unified SRST voice gateway allows you to associate phones with different MOH groups on the basis of their IP address to receive different MOH media streams.
3. A maximum of five media streams are allowed.
4. Cisco Unified SRST voice gateway allows you to associate phones with different MOH groups on the basis of their MAC address to receive different MOH media streams.
5. Cisco Unified SRST voice gateway allows you to associate phones with different MOH groups on the basis of their extension numbers to receive different MOH media streams.
Answer: C,E
Question No: 196 – (Topic 1)
Refer to the exhibit.
Dumps4Cert 2018 PDF and VCE
This output was captured on a Cisco IOS gateway shortly after it became the active Cisco Unified Border
Element in a box-to-box redundancy failover.
How many calls are native to this Cisco Unified Border Element?
1. 9
2. 12
3. 19
4. 31
5. 40
Answer: D Explanation:
To check for native and nonnative (preserved) calls when both are present The numbers of calls on the system are shown as follows:
Total number of calls = quot;Number of calls in HA DBquot; quot;Number of calls in HA sync pending DBquot;.
Total number of preserved (nonnative) calls = quot;Number of calls in HA preserved session DBquot;.
Total number of native calls (calls set up since the failover and therefore not preserved over the failover) is the difference in the previous two numbers. In this example, it is (28 12) – 9 = 31.
Question No: 197 – (Topic 1)
Refer to the exhibit.
Dumps4Cert 2018 PDF and VCE
What happens to the USB e-token after the administrator fails to enter the correct password at the next attempt?
1. The token is locked for five days, after which the retry counter resets.
2. The token is locked until unlocked by Cisco TAC.
3. The token is locked until Cisco CTL Client is uninstalled and reinstalled on the client PC.
4. The token cannot be used on the same client PC again. It can be used with another Cisco CTL Client on a different PC.
5. The token is locked forever.
Answer: E
Question No: 198 – (Topic 1)
Refer to the exhibit.
Dumps4Cert 2018 PDF and VCE
Which three events happen When Alice calls [email protected] and the URI lookup policy on the Cisco Unified CM server has been set to case insensitive? (Choose three)
1. The RTP server routes the call to [email protected] because remote URIs have priority
2. The RTP sever looks up to see if [email protected] is associated to a local number
3. The San Jose server calls [email protected] upon receiving the invite request
4. The San Jose server provide carol#39;s directory URI using ILS exchange
5. The RTP server sends the call to [email protected] because it has priority
6. The RTP server drops the call because it has two identical matches
Answer: B,D,E
Question No: 199 – (Topic 1)
Refer to the exhibit.
Dumps4Cert 2018 PDF and VCE
Which ephone-dn can join the hunt group whenever a wild card slot becomes available?
1. ephone-dn 1
2. ephone-dn 2
3. ephone-dn 3
4. ephone-dn 4
5. ephone-dn 6
Answer: C
Question No: 200 – (Topic 1)
Which two QoS guidelines are recommended for provisioning interactive video traffic? (Choose two.)
1. Latency should be no more than 4-5 seconds.
2. Overprovision interactive video queues by 20% to accommodate bursts.
3. Loss should be no more than 5%.
4. Interactive video should be marked to DSCP CS4.
5. Jitter should be no more than 30 ms.
Answer: B,E
100% Dumps4cert Free Download!
Download Free Demo:400-051 Demo PDF
100% Dumps4cert Pass Guaranteed!
400-051 Dumps
Dumps4cert ExamCollection Testking
Lowest Price Guarantee Yes No No
Up-to-Dated Yes No No
Real Questions Yes No No
Explanation Yes No No
PDF VCE Yes No No
Free VCE Simulator Yes No No
Instant Download Yes No No
Leave a Reply
|
__label__pos
| 0.880602 |
blob: c67375ad5c66e7810da144d3091748d1b9512641 [file] [log] [blame]
//========================================================================
//
// Stream.cc
//
// Copyright 1996-2003 Glyph & Cog, LLC
//
//========================================================================
//========================================================================
//
// Modified under the Poppler project - http://poppler.freedesktop.org
//
// All changes made under the Poppler project to this file are licensed
// under GPL version 2 or later
//
// Copyright (C) 2005 Jeff Muizelaar <[email protected]>
// Copyright (C) 2006-2010, 2012-2014, 2016-2021 Albert Astals Cid <[email protected]>
// Copyright (C) 2007 Krzysztof Kowalczyk <[email protected]>
// Copyright (C) 2008 Julien Rebetez <[email protected]>
// Copyright (C) 2009 Carlos Garcia Campos <[email protected]>
// Copyright (C) 2009 Glenn Ganz <[email protected]>
// Copyright (C) 2009 Stefan Thomas <[email protected]>
// Copyright (C) 2010 Hib Eris <[email protected]>
// Copyright (C) 2010 Tomas Hoger <[email protected]>
// Copyright (C) 2011, 2012, 2016, 2020 William Bader <[email protected]>
// Copyright (C) 2012, 2013, 2020 Thomas Freitag <[email protected]>
// Copyright (C) 2012, 2021 Oliver Sander <[email protected]>
// Copyright (C) 2012 Fabio D'Urso <[email protected]>
// Copyright (C) 2012 Even Rouault <[email protected]>
// Copyright (C) 2013, 2017, 2018 Adrian Johnson <[email protected]>
// Copyright (C) 2013, 2018 Adam Reichold <[email protected]>
// Copyright (C) 2013 Pino Toscano <[email protected]>
// Copyright (C) 2015 Suzuki Toshiya <[email protected]>
// Copyright (C) 2015 Jason Crain <[email protected]>
// Copyright (C) 2017 Jose Aliste <[email protected]>
// Copyright (C) 2017 Kay Dohmann <[email protected]>
// Copyright (C) 2019 Christian Persch <[email protected]>
// Copyright (C) 2019 LE GARREC Vincent <[email protected]>
// Copyright (C) 2019 Volker Krause <[email protected]>
// Copyright (C) 2019 Alexander Volkov <[email protected]>
// Copyright (C) 2020 Philipp Knechtges <[email protected]>
// Copyright (C) 2021 Hubert Figuiere <[email protected]>
// Copyright (C) 2021 Georgiy Sgibnev <[email protected]>. Work sponsored by lab50.net.
//
// To see a description of the changes please see the Changelog file that
// came with your tarball or type make ChangeLog if you are building from git
//
//========================================================================
#include <config.h>
#include <cstdio>
#include <cstdlib>
#include <cstddef>
#include <climits>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#include <cstring>
#include <cctype>
#include "goo/gmem.h"
#include "goo/gfile.h"
#include "poppler-config.h"
#include "Error.h"
#include "Object.h"
#include "Lexer.h"
#include "GfxState.h"
#include "Stream.h"
#include "XRef.h"
#include "JBIG2Stream.h"
#include "Stream-CCITT.h"
#include "CachedFile.h"
#include "splash/SplashBitmap.h"
#ifdef ENABLE_LIBJPEG
# include "DCTStream.h"
#endif
#ifdef ENABLE_ZLIB_UNCOMPRESS
# include "FlateStream.h"
#endif
#ifdef ENABLE_LIBOPENJPEG
# include "JPEG2000Stream.h"
#else
# include "JPXStream.h"
#endif
#ifdef __DJGPP__
static bool setDJSYSFLAGS = false;
#endif
//------------------------------------------------------------------------
// Stream (base class)
//------------------------------------------------------------------------
Stream::Stream()
{
ref = 1;
}
Stream::~Stream() = default;
void Stream::close() { }
int Stream::getRawChar()
{
error(errInternal, -1, "Internal: called getRawChar() on non-predictor stream");
return EOF;
}
int Stream::getChars(int nChars, unsigned char *buffer)
{
error(errInternal, -1, "Internal: called getChars() on non-predictor stream");
return 0;
}
void Stream::getRawChars(int nChars, int *buffer)
{
error(errInternal, -1, "Internal: called getRawChars() on non-predictor stream");
}
char *Stream::getLine(char *buf, int size)
{
int i;
int c;
if (lookChar() == EOF || size < 0)
return nullptr;
for (i = 0; i < size - 1; ++i) {
c = getChar();
if (c == EOF || c == '\n')
break;
if (c == '\r') {
if ((c = lookChar()) == '\n')
getChar();
break;
}
buf[i] = c;
}
buf[i] = '\0';
return buf;
}
unsigned int Stream::discardChars(unsigned int n)
{
unsigned char buf[4096];
unsigned int count, i, j;
count = 0;
while (count < n) {
if ((i = n - count) > sizeof(buf)) {
i = (unsigned int)sizeof(buf);
}
j = (unsigned int)doGetChars((int)i, buf);
count += j;
if (j != i) {
break;
}
}
return count;
}
GooString *Stream::getPSFilter(int psLevel, const char *indent)
{
return new GooString();
}
static Stream *wrapEOFStream(Stream *str)
{
if (dynamic_cast<EOFStream *>(str)) {
// str is already a EOFStream, no need to wrap it in another EOFStream
return str;
} else {
return new EOFStream(str);
}
}
Stream *Stream::addFilters(Dict *dict, int recursion)
{
Object obj, obj2;
Object params, params2;
Stream *str;
int i;
str = this;
obj = dict->lookup("Filter", recursion);
if (obj.isNull()) {
obj = dict->lookup("F", recursion);
}
params = dict->lookup("DecodeParms", recursion);
if (params.isNull()) {
params = dict->lookup("DP", recursion);
}
if (obj.isName()) {
str = makeFilter(obj.getName(), str, ¶ms, recursion, dict);
} else if (obj.isArray()) {
for (i = 0; i < obj.arrayGetLength(); ++i) {
obj2 = obj.arrayGet(i, recursion);
if (params.isArray())
params2 = params.arrayGet(i, recursion);
else
params2.setToNull();
if (obj2.isName()) {
str = makeFilter(obj2.getName(), str, ¶ms2, recursion);
} else {
error(errSyntaxError, getPos(), "Bad filter name");
str = wrapEOFStream(str);
}
}
} else if (!obj.isNull()) {
error(errSyntaxError, getPos(), "Bad 'Filter' attribute in stream");
}
return str;
}
bool Stream::isEncrypted() const
{
for (const Stream *str = this; str != nullptr; str = str->getNextStream()) {
if (str->getKind() == strCrypt)
return true;
}
return false;
}
class BaseStreamStream : public Stream
{
public:
explicit BaseStreamStream(Stream *strA) : str(strA) { }
~BaseStreamStream() override;
StreamKind getKind() const override { return str->getBaseStream()->getKind(); }
void reset() override { str->getBaseStream()->reset(); }
int getChar() override { return str->getBaseStream()->getChar(); }
int lookChar() override { return str->getBaseStream()->lookChar(); }
bool isBinary(bool last = true) const override { return str->getBaseStream()->isBinary(); }
int getUnfilteredChar() override { return str->getBaseStream()->getUnfilteredChar(); }
void unfilteredReset() override { str->getBaseStream()->unfilteredReset(); }
Goffset getPos() override { return str->getBaseStream()->getPos(); }
void setPos(Goffset pos, int dir) override { str->getBaseStream()->setPos(pos, dir); }
BaseStream *getBaseStream() override { return str->getBaseStream()->getBaseStream(); }
Stream *getUndecodedStream() override { return str->getBaseStream()->getUndecodedStream(); }
Dict *getDict() override { return str->getBaseStream()->getDict(); }
Object *getDictObject() override { return str->getBaseStream()->getDictObject(); }
private:
std::unique_ptr<Stream> str;
};
BaseStreamStream::~BaseStreamStream() = default;
Stream *Stream::makeFilter(const char *name, Stream *str, Object *params, int recursion, Dict *dict)
{
int pred; // parameters
int colors;
int bits;
int early;
int encoding;
bool endOfLine, byteAlign, endOfBlock, black, damagedRowsBeforeError;
int columns, rows;
Object obj;
if (!strcmp(name, "ASCIIHexDecode") || !strcmp(name, "AHx")) {
str = new ASCIIHexStream(str);
} else if (!strcmp(name, "ASCII85Decode") || !strcmp(name, "A85")) {
str = new ASCII85Stream(str);
} else if (!strcmp(name, "LZWDecode") || !strcmp(name, "LZW")) {
pred = 1;
columns = 1;
colors = 1;
bits = 8;
early = 1;
if (params->isDict()) {
obj = params->dictLookup("Predictor", recursion);
if (obj.isInt())
pred = obj.getInt();
obj = params->dictLookup("Columns", recursion);
if (obj.isInt())
columns = obj.getInt();
obj = params->dictLookup("Colors", recursion);
if (obj.isInt())
colors = obj.getInt();
obj = params->dictLookup("BitsPerComponent", recursion);
if (obj.isInt())
bits = obj.getInt();
obj = params->dictLookup("EarlyChange", recursion);
if (obj.isInt())
early = obj.getInt();
}
str = new LZWStream(str, pred, columns, colors, bits, early);
} else if (!strcmp(name, "RunLengthDecode") || !strcmp(name, "RL")) {
str = new RunLengthStream(str);
} else if (!strcmp(name, "CCITTFaxDecode") || !strcmp(name, "CCF")) {
encoding = 0;
endOfLine = false;
byteAlign = false;
columns = 1728;
rows = 0;
endOfBlock = true;
black = false;
damagedRowsBeforeError = false;
if (params->isDict()) {
obj = params->dictLookup("K", recursion);
if (obj.isInt()) {
encoding = obj.getInt();
}
obj = params->dictLookup("EndOfLine", recursion);
if (obj.isBool()) {
endOfLine = obj.getBool();
}
obj = params->dictLookup("EncodedByteAlign", recursion);
if (obj.isBool()) {
byteAlign = obj.getBool();
}
obj = params->dictLookup("Columns", recursion);
if (obj.isInt()) {
columns = obj.getInt();
}
obj = params->dictLookup("Rows", recursion);
if (obj.isInt()) {
rows = obj.getInt();
}
obj = params->dictLookup("EndOfBlock", recursion);
if (obj.isBool()) {
endOfBlock = obj.getBool();
}
obj = params->dictLookup("BlackIs1", recursion);
if (obj.isBool()) {
black = obj.getBool();
}
obj = params->dictLookup("DamagedRowsBeforeError", recursion);
if (obj.isInt()) {
damagedRowsBeforeError = obj.getInt();
}
}
str = new CCITTFaxStream(str, encoding, endOfLine, byteAlign, columns, rows, endOfBlock, black, damagedRowsBeforeError);
} else if (!strcmp(name, "DCTDecode") || !strcmp(name, "DCT")) {
#ifdef HAVE_DCT_DECODER
int colorXform = -1;
if (params->isDict()) {
obj = params->dictLookup("ColorTransform", recursion);
if (obj.isInt()) {
colorXform = obj.getInt();
}
}
str = new DCTStream(str, colorXform, dict, recursion);
#else
error(errSyntaxError, getPos(), "Unknown filter '{0:s}'", name);
str = wrapEOFStream(str);
#endif
} else if (!strcmp(name, "FlateDecode") || !strcmp(name, "Fl")) {
pred = 1;
columns = 1;
colors = 1;
bits = 8;
if (params->isDict()) {
obj = params->dictLookup("Predictor", recursion);
if (obj.isInt())
pred = obj.getInt();
obj = params->dictLookup("Columns", recursion);
if (obj.isInt())
columns = obj.getInt();
obj = params->dictLookup("Colors", recursion);
if (obj.isInt())
colors = obj.getInt();
obj = params->dictLookup("BitsPerComponent", recursion);
if (obj.isInt())
bits = obj.getInt();
}
str = new FlateStream(str, pred, columns, colors, bits);
} else if (!strcmp(name, "JBIG2Decode")) {
Object globals;
if (params->isDict()) {
XRef *xref = params->getDict()->getXRef();
obj = params->dictLookupNF("JBIG2Globals").copy();
globals = obj.fetch(xref, recursion);
}
str = new JBIG2Stream(str, std::move(globals), &obj);
} else if (!strcmp(name, "JPXDecode")) {
#ifdef HAVE_JPX_DECODER
str = new JPXStream(str);
#else
error(errSyntaxError, getPos(), "Unknown filter '{0:s}'", name);
str = wrapEOFStream(str);
#endif
} else if (!strcmp(name, "Crypt")) {
if (str->getKind() == strCrypt) {
str = new BaseStreamStream(str);
} else {
error(errSyntaxError, getPos(), "Can't revert non decrypt streams");
}
} else {
error(errSyntaxError, getPos(), "Unknown filter '{0:s}'", name);
str = wrapEOFStream(str);
}
return str;
}
//------------------------------------------------------------------------
// OutStream
//------------------------------------------------------------------------
OutStream::OutStream() { }
OutStream::~OutStream() { }
//------------------------------------------------------------------------
// FileOutStream
//------------------------------------------------------------------------
FileOutStream::FileOutStream(FILE *fa, Goffset startA)
{
f = fa;
start = startA;
}
FileOutStream::~FileOutStream()
{
close();
}
void FileOutStream::close() { }
Goffset FileOutStream::getPos()
{
return Gftell(f);
}
void FileOutStream::put(char c)
{
fputc(c, f);
}
void FileOutStream::printf(const char *format, ...)
{
va_list argptr;
va_start(argptr, format);
vfprintf(f, format, argptr);
va_end(argptr);
}
//------------------------------------------------------------------------
// BaseStream
//------------------------------------------------------------------------
BaseStream::BaseStream(Object &&dictA, Goffset lengthA)
{
dict = std::move(dictA);
length = lengthA;
}
BaseStream::~BaseStream() { }
//------------------------------------------------------------------------
// BaseStream
//------------------------------------------------------------------------
BaseSeekInputStream::BaseSeekInputStream(Goffset startA, bool limitedA, Goffset lengthA, Object &&dictA)
: BaseStream(std::move(dictA), lengthA), start(startA), limited(limitedA), bufPtr(buf), bufEnd(buf), bufPos(start), savePos(0), saved(false)
{
}
BaseSeekInputStream::~BaseSeekInputStream() { }
void BaseSeekInputStream::reset()
{
savePos = currentPos();
setCurrentPos(start);
saved = true;
bufPtr = bufEnd = buf;
bufPos = start;
}
void BaseSeekInputStream::close()
{
if (!saved)
return;
setCurrentPos(savePos);
saved = false;
}
void BaseSeekInputStream::setPos(Goffset pos, int dir)
{
if (dir >= 0) {
setCurrentPos(pos);
bufPos = pos;
} else {
if (pos > length)
pos = length;
bufPos = length - pos;
setCurrentPos(bufPos);
}
bufPtr = bufEnd = buf;
}
void BaseSeekInputStream::moveStart(Goffset delta)
{
start += delta;
bufPtr = bufEnd = buf;
bufPos = start;
}
bool BaseSeekInputStream::fillBuf()
{
Goffset n;
bufPos += bufEnd - buf;
bufPtr = bufEnd = buf;
if (limited && bufPos >= start + length) {
return false;
}
if (limited && bufPos + seekInputStreamBufSize > start + length) {
n = start + length - bufPos;
} else {
n = seekInputStreamBufSize - (bufPos % seekInputStreamBufSize);
}
n = read(buf, n);
bufEnd = buf + n;
if (bufPtr >= bufEnd) {
return false;
}
return true;
}
int BaseSeekInputStream::getChars(int nChars, unsigned char *buffer)
{
int n, m;
n = 0;
while (n < nChars) {
if (bufPtr >= bufEnd) {
if (!fillBuf()) {
break;
}
}
m = (int)(bufEnd - bufPtr);
if (m > nChars - n) {
m = nChars - n;
}
memcpy(buffer + n, bufPtr, m);
bufPtr += m;
n += m;
}
return n;
}
//------------------------------------------------------------------------
// FilterStream
//------------------------------------------------------------------------
FilterStream::FilterStream(Stream *strA)
{
str = strA;
}
FilterStream::~FilterStream() { }
void FilterStream::close()
{
str->close();
}
void FilterStream::setPos(Goffset pos, int dir)
{
error(errInternal, -1, "Internal: called setPos() on FilterStream");
}
//------------------------------------------------------------------------
// ImageStream
//------------------------------------------------------------------------
ImageStream::ImageStream(Stream *strA, int widthA, int nCompsA, int nBitsA)
{
int imgLineSize;
str = strA;
width = widthA;
nComps = nCompsA;
nBits = nBitsA;
nVals = width * nComps;
inputLineSize = (nVals * nBits + 7) >> 3;
if (nComps <= 0 || nBits <= 0 || nVals > INT_MAX / nBits - 7 || width > INT_MAX / nComps) {
inputLineSize = -1;
}
inputLine = (unsigned char *)gmallocn_checkoverflow(inputLineSize, sizeof(char));
if (nBits == 8) {
imgLine = (unsigned char *)inputLine;
} else {
if (nBits == 1) {
imgLineSize = (nVals + 7) & ~7;
} else {
imgLineSize = nVals;
}
if (nComps <= 0 || width > INT_MAX / nComps) {
imgLineSize = -1;
}
imgLine = (unsigned char *)gmallocn_checkoverflow(imgLineSize, sizeof(unsigned char));
}
imgIdx = nVals;
}
ImageStream::~ImageStream()
{
if (imgLine != (unsigned char *)inputLine) {
gfree(imgLine);
}
gfree(inputLine);
}
void ImageStream::reset()
{
str->reset();
}
void ImageStream::close()
{
str->close();
}
bool ImageStream::getPixel(unsigned char *pix)
{
int i;
if (imgIdx >= nVals) {
if (!getLine()) {
return false;
}
imgIdx = 0;
}
for (i = 0; i < nComps; ++i) {
pix[i] = imgLine[imgIdx++];
}
return true;
}
unsigned char *ImageStream::getLine()
{
if (unlikely(inputLine == nullptr)) {
return nullptr;
}
int readChars = str->doGetChars(inputLineSize, inputLine);
if (unlikely(readChars == -1)) {
readChars = 0;
}
for (; readChars < inputLineSize; readChars++)
inputLine[readChars] = EOF;
if (nBits == 1) {
unsigned char *p = inputLine;
for (int i = 0; i < nVals; i += 8) {
const int c = *p++;
imgLine[i + 0] = (unsigned char)((c >> 7) & 1);
imgLine[i + 1] = (unsigned char)((c >> 6) & 1);
imgLine[i + 2] = (unsigned char)((c >> 5) & 1);
imgLine[i + 3] = (unsigned char)((c >> 4) & 1);
imgLine[i + 4] = (unsigned char)((c >> 3) & 1);
imgLine[i + 5] = (unsigned char)((c >> 2) & 1);
imgLine[i + 6] = (unsigned char)((c >> 1) & 1);
imgLine[i + 7] = (unsigned char)(c & 1);
}
} else if (nBits == 8) {
// special case: imgLine == inputLine
} else if (nBits == 16) {
// this is a hack to support 16 bits images, everywhere
// we assume a component fits in 8 bits, with this hack
// we treat 16 bit images as 8 bit ones until it's fixed correctly.
// The hack has another part on GfxImageColorMap::GfxImageColorMap
unsigned char *p = inputLine;
for (int i = 0; i < nVals; ++i) {
imgLine[i] = *p++;
p++;
}
} else {
const unsigned long bitMask = (1 << nBits) - 1;
unsigned long buf = 0;
int bits = 0;
unsigned char *p = inputLine;
for (int i = 0; i < nVals; ++i) {
while (bits < nBits) {
buf = (buf << 8) | (*p++ & 0xff);
bits += 8;
}
imgLine[i] = (unsigned char)((buf >> (bits - nBits)) & bitMask);
bits -= nBits;
}
}
return imgLine;
}
void ImageStream::skipLine()
{
str->doGetChars(inputLineSize, inputLine);
}
//------------------------------------------------------------------------
// StreamPredictor
//------------------------------------------------------------------------
StreamPredictor::StreamPredictor(Stream *strA, int predictorA, int widthA, int nCompsA, int nBitsA)
{
str = strA;
predictor = predictorA;
width = widthA;
nComps = nCompsA;
nBits = nBitsA;
predLine = nullptr;
ok = false;
nVals = width * nComps;
if (width <= 0 || nComps <= 0 || nBits <= 0 || nComps > gfxColorMaxComps || nBits > 16 || width >= INT_MAX / nComps || // check for overflow in nVals
nVals >= (INT_MAX - 7) / nBits) { // check for overflow in rowBytes
return;
}
pixBytes = (nComps * nBits + 7) >> 3;
rowBytes = ((nVals * nBits + 7) >> 3) + pixBytes;
predLine = (unsigned char *)gmalloc(rowBytes);
memset(predLine, 0, rowBytes);
predIdx = rowBytes;
ok = true;
}
StreamPredictor::~StreamPredictor()
{
gfree(predLine);
}
int StreamPredictor::lookChar()
{
if (predIdx >= rowBytes) {
if (!getNextLine()) {
return EOF;
}
}
return predLine[predIdx];
}
int StreamPredictor::getChar()
{
if (predIdx >= rowBytes) {
if (!getNextLine()) {
return EOF;
}
}
return predLine[predIdx++];
}
int StreamPredictor::getChars(int nChars, unsigned char *buffer)
{
int n, m;
n = 0;
while (n < nChars) {
if (predIdx >= rowBytes) {
if (!getNextLine()) {
break;
}
}
m = rowBytes - predIdx;
if (m > nChars - n) {
m = nChars - n;
}
memcpy(buffer + n, predLine + predIdx, m);
predIdx += m;
n += m;
}
return n;
}
bool StreamPredictor::getNextLine()
{
int curPred;
unsigned char upLeftBuf[gfxColorMaxComps * 2 + 1];
int left, up, upLeft, p, pa, pb, pc;
int c;
unsigned long inBuf, outBuf;
int inBits, outBits;
int i, j, k, kk;
// get PNG optimum predictor number
if (predictor >= 10) {
if ((curPred = str->getRawChar()) == EOF) {
return false;
}
curPred += 10;
} else {
curPred = predictor;
}
// read the raw line, apply PNG (byte) predictor
int *rawCharLine = new int[rowBytes - pixBytes];
str->getRawChars(rowBytes - pixBytes, rawCharLine);
memset(upLeftBuf, 0, pixBytes + 1);
for (i = pixBytes; i < rowBytes; ++i) {
for (j = pixBytes; j > 0; --j) {
upLeftBuf[j] = upLeftBuf[j - 1];
}
upLeftBuf[0] = predLine[i];
if ((c = rawCharLine[i - pixBytes]) == EOF) {
if (i > pixBytes) {
// this ought to return false, but some (broken) PDF files
// contain truncated image data, and Adobe apparently reads the
// last partial line
break;
}
delete[] rawCharLine;
return false;
}
switch (curPred) {
case 11: // PNG sub
predLine[i] = predLine[i - pixBytes] + (unsigned char)c;
break;
case 12: // PNG up
predLine[i] = predLine[i] + (unsigned char)c;
break;
case 13: // PNG average
predLine[i] = ((predLine[i - pixBytes] + predLine[i]) >> 1) + (unsigned char)c;
break;
case 14: // PNG Paeth
left = predLine[i - pixBytes];
up = predLine[i];
upLeft = upLeftBuf[pixBytes];
p = left + up - upLeft;
if ((pa = p - left) < 0)
pa = -pa;
if ((pb = p - up) < 0)
pb = -pb;
if ((pc = p - upLeft) < 0)
pc = -pc;
if (pa <= pb && pa <= pc)
predLine[i] = left + (unsigned char)c;
else if (pb <= pc)
predLine[i] = up + (unsigned char)c;
else
predLine[i] = upLeft + (unsigned char)c;
break;
case 10: // PNG none
default: // no predictor or TIFF predictor
predLine[i] = (unsigned char)c;
break;
}
}
delete[] rawCharLine;
// apply TIFF (component) predictor
if (predictor == 2) {
if (nBits == 1 && nComps == 1) {
inBuf = predLine[pixBytes - 1];
for (i = pixBytes; i < rowBytes; ++i) {
c = predLine[i] ^ inBuf;
c ^= c >> 1;
c ^= c >> 2;
c ^= c >> 4;
inBuf = (c & 1) << 7;
predLine[i] = c;
}
} else if (nBits == 8) {
for (i = pixBytes; i < rowBytes; ++i) {
predLine[i] += predLine[i - nComps];
}
} else {
memset(upLeftBuf, 0, nComps + 1);
const unsigned long bitMask = (1 << nBits) - 1;
inBuf = outBuf = 0;
inBits = outBits = 0;
j = k = pixBytes;
for (i = 0; i < width; ++i) {
for (kk = 0; kk < nComps; ++kk) {
while (inBits < nBits) {
inBuf = (inBuf << 8) | (predLine[j++] & 0xff);
inBits += 8;
}
upLeftBuf[kk] = (unsigned char)((upLeftBuf[kk] + (inBuf >> (inBits - nBits))) & bitMask);
inBits -= nBits;
outBuf = (outBuf << nBits) | upLeftBuf[kk];
outBits += nBits;
if (outBits >= 8) {
predLine[k++] = (unsigned char)(outBuf >> (outBits - 8));
outBits -= 8;
}
}
}
if (outBits > 0) {
predLine[k++] = (unsigned char)((outBuf << (8 - outBits)) + (inBuf & ((1 << (8 - outBits)) - 1)));
}
}
}
// reset to start of line
predIdx = pixBytes;
return true;
}
//------------------------------------------------------------------------
// FileStream
//------------------------------------------------------------------------
FileStream::FileStream(GooFile *fileA, Goffset startA, bool limitedA, Goffset lengthA, Object &&dictA) : BaseStream(std::move(dictA), lengthA)
{
file = fileA;
offset = start = startA;
limited = limitedA;
length = lengthA;
bufPtr = bufEnd = buf;
bufPos = start;
savePos = 0;
saved = false;
needsEncryptionOnSave = false;
}
FileStream::~FileStream()
{
close();
}
BaseStream *FileStream::copy()
{
return new FileStream(file, start, limited, length, dict.copy());
}
Stream *FileStream::makeSubStream(Goffset startA, bool limitedA, Goffset lengthA, Object &&dictA)
{
return new FileStream(file, startA, limitedA, lengthA, std::move(dictA));
}
void FileStream::reset()
{
savePos = offset;
offset = start;
saved = true;
bufPtr = bufEnd = buf;
bufPos = start;
}
void FileStream::close()
{
if (saved) {
offset = savePos;
saved = false;
}
}
bool FileStream::fillBuf()
{
int n;
bufPos += bufEnd - buf;
bufPtr = bufEnd = buf;
if (limited && bufPos >= start + length) {
return false;
}
if (limited && bufPos + fileStreamBufSize > start + length) {
n = start + length - bufPos;
} else {
n = fileStreamBufSize;
}
n = file->read(buf, n, offset);
if (n == -1) {
return false;
}
offset += n;
bufEnd = buf + n;
if (bufPtr >= bufEnd) {
return false;
}
return true;
}
void FileStream::setPos(Goffset pos, int dir)
{
Goffset size;
if (dir >= 0) {
offset = bufPos = pos;
} else {
size = file->size();
if (pos > size)
pos = size;
offset = size - pos;
bufPos = offset;
}
bufPtr = bufEnd = buf;
}
void FileStream::moveStart(Goffset delta)
{
start += delta;
bufPtr = bufEnd = buf;
bufPos = start;
}
//------------------------------------------------------------------------
// CachedFileStream
//------------------------------------------------------------------------
CachedFileStream::CachedFileStream(CachedFile *ccA, Goffset startA, bool limitedA, Goffset lengthA, Object &&dictA) : BaseStream(std::move(dictA), lengthA)
{
cc = ccA;
start = startA;
limited = limitedA;
length = lengthA;
bufPtr = bufEnd = buf;
bufPos = start;
savePos = 0;
saved = false;
}
CachedFileStream::~CachedFileStream()
{
close();
cc->decRefCnt();
}
BaseStream *CachedFileStream::copy()
{
cc->incRefCnt();
return new CachedFileStream(cc, start, limited, length, dict.copy());
}
Stream *CachedFileStream::makeSubStream(Goffset startA, bool limitedA, Goffset lengthA, Object &&dictA)
{
cc->incRefCnt();
return new CachedFileStream(cc, startA, limitedA, lengthA, std::move(dictA));
}
void CachedFileStream::reset()
{
savePos = (unsigned int)cc->tell();
cc->seek(start, SEEK_SET);
saved = true;
bufPtr = bufEnd = buf;
bufPos = start;
}
void CachedFileStream::close()
{
if (saved) {
cc->seek(savePos, SEEK_SET);
saved = false;
}
}
bool CachedFileStream::fillBuf()
{
int n;
bufPos += bufEnd - buf;
bufPtr = bufEnd = buf;
if (limited && bufPos >= start + length) {
return false;
}
if (limited && bufPos + cachedStreamBufSize > start + length) {
n = start + length - bufPos;
} else {
n = cachedStreamBufSize - (bufPos % cachedStreamBufSize);
}
n = cc->read(buf, 1, n);
bufEnd = buf + n;
if (bufPtr >= bufEnd) {
return false;
}
return true;
}
void CachedFileStream::setPos(Goffset pos, int dir)
{
unsigned int size;
if (dir >= 0) {
cc->seek(pos, SEEK_SET);
bufPos = pos;
} else {
cc->seek(0, SEEK_END);
size = (unsigned int)cc->tell();
if (pos > size)
pos = (unsigned int)size;
cc->seek(-(int)pos, SEEK_END);
bufPos = (unsigned int)cc->tell();
}
bufPtr = bufEnd = buf;
}
void CachedFileStream::moveStart(Goffset delta)
{
start += delta;
bufPtr = bufEnd = buf;
bufPos = start;
}
MemStream::~MemStream() = default;
AutoFreeMemStream::~AutoFreeMemStream()
{
gfree(buf);
}
bool AutoFreeMemStream::isFilterRemovalForbidden() const
{
return filterRemovalForbidden;
}
void AutoFreeMemStream::setFilterRemovalForbidden(bool forbidden)
{
filterRemovalForbidden = forbidden;
}
//------------------------------------------------------------------------
// EmbedStream
//------------------------------------------------------------------------
EmbedStream::EmbedStream(Stream *strA, Object &&dictA, bool limitedA, Goffset lengthA, bool reusableA) : BaseStream(std::move(dictA), lengthA)
{
str = strA;
limited = limitedA;
length = lengthA;
reusable = reusableA;
record = false;
replay = false;
start = str->getPos();
if (reusable) {
bufData = (unsigned char *)gmalloc(16384);
bufMax = 16384;
bufLen = 0;
record = true;
}
}
EmbedStream::~EmbedStream()
{
if (reusable)
gfree(bufData);
}
void EmbedStream::reset()
{
if (str->getPos() != start) {
str->reset();
// Might be a FilterStream that does not support str->setPos(start)
while (str->getPos() < start) {
if (str->getChar() == EOF) {
break;
}
}
if (str->getPos() != start) {
error(errInternal, -1, "Failed to reset EmbedStream");
}
}
record = false;
replay = false;
bufPos = 0;
}
BaseStream *EmbedStream::copy()
{
error(errInternal, -1, "Called copy() on EmbedStream");
return nullptr;
}
Stream *EmbedStream::makeSubStream(Goffset startA, bool limitedA, Goffset lengthA, Object &&dictA)
{
error(errInternal, -1, "Called makeSubStream() on EmbedStream");
return nullptr;
}
void EmbedStream::rewind()
{
record = false;
replay = true;
bufPos = 0;
}
void EmbedStream::restore()
{
replay = false;
}
Goffset EmbedStream::getPos()
{
if (replay)
return bufPos;
else
return str->getPos();
}
int EmbedStream::getChar()
{
if (replay) {
if (bufPos < bufLen)
return bufData[bufPos++];
else
return EOF;
} else {
if (limited && !length) {
return EOF;
}
int c = str->getChar();
--length;
if (record) {
bufData[bufLen] = c;
bufLen++;
if (bufLen >= bufMax) {
bufMax *= 2;
bufData = (unsigned char *)grealloc(bufData, bufMax);
}
}
return c;
}
}
int EmbedStream::lookChar()
{
if (replay) {
if (bufPos < bufLen)
return bufData[bufPos];
else
return EOF;
} else {
if (limited && !length) {
return EOF;
}
return str->lookChar();
}
}
int EmbedStream::getChars(int nChars, unsigned char *buffer)
{
int len;
if (nChars <= 0) {
return 0;
}
if (replay) {
if (bufPos >= bufLen)
return EOF;
len = bufLen - bufPos;
if (nChars > len)
nChars = len;
memcpy(buffer, bufData, nChars);
return len;
} else {
if (limited && length < nChars) {
nChars = length;
}
len = str->doGetChars(nChars, buffer);
if (record) {
if (bufLen + len >= bufMax) {
while (bufLen + len >= bufMax)
bufMax *= 2;
bufData = (unsigned char *)grealloc(bufData, bufMax);
}
memcpy(bufData + bufLen, buffer, len);
bufLen += len;
}
}
return len;
}
void EmbedStream::setPos(Goffset pos, int dir)
{
error(errInternal, -1, "Internal: called setPos() on EmbedStream");
}
Goffset EmbedStream::getStart()
{
error(errInternal, -1, "Internal: called getStart() on EmbedStream");
return 0;
}
void EmbedStream::moveStart(Goffset delta)
{
error(errInternal, -1, "Internal: called moveStart() on EmbedStream");
}
//------------------------------------------------------------------------
// ASCIIHexStream
//------------------------------------------------------------------------
ASCIIHexStream::ASCIIHexStream(Stream *strA) : FilterStream(strA)
{
buf = EOF;
eof = false;
}
ASCIIHexStream::~ASCIIHexStream()
{
delete str;
}
void ASCIIHexStream::reset()
{
str->reset();
buf = EOF;
eof = false;
}
int ASCIIHexStream::lookChar()
{
int c1, c2, x;
if (buf != EOF)
return buf;
if (eof) {
buf = EOF;
return EOF;
}
do {
c1 = str->getChar();
} while (isspace(c1));
if (c1 == '>') {
eof = true;
buf = EOF;
return buf;
}
do {
c2 = str->getChar();
} while (isspace(c2));
if (c2 == '>') {
eof = true;
c2 = '0';
}
if (c1 >= '0' && c1 <= '9') {
x = (c1 - '0') << 4;
} else if (c1 >= 'A' && c1 <= 'F') {
x = (c1 - 'A' + 10) << 4;
} else if (c1 >= 'a' && c1 <= 'f') {
x = (c1 - 'a' + 10) << 4;
} else if (c1 == EOF) {
eof = true;
x = 0;
} else {
error(errSyntaxError, getPos(), "Illegal character <{0:02x}> in ASCIIHex stream", c1);
x = 0;
}
if (c2 >= '0' && c2 <= '9') {
x += c2 - '0';
} else if (c2 >= 'A' && c2 <= 'F') {
x += c2 - 'A' + 10;
} else if (c2 >= 'a' && c2 <= 'f') {
x += c2 - 'a' + 10;
} else if (c2 == EOF) {
eof = true;
x = 0;
} else {
error(errSyntaxError, getPos(), "Illegal character <{0:02x}> in ASCIIHex stream", c2);
}
buf = x & 0xff;
return buf;
}
GooString *ASCIIHexStream::getPSFilter(int psLevel, const char *indent)
{
GooString *s;
if (psLevel < 2) {
return nullptr;
}
if (!(s = str->getPSFilter(psLevel, indent))) {
return nullptr;
}
s->append(indent)->append("/ASCIIHexDecode filter\n");
return s;
}
bool ASCIIHexStream::isBinary(bool last) const
{
return str->isBinary(false);
}
//------------------------------------------------------------------------
// ASCII85Stream
//------------------------------------------------------------------------
ASCII85Stream::ASCII85Stream(Stream *strA) : FilterStream(strA)
{
index = n = 0;
eof = false;
}
ASCII85Stream::~ASCII85Stream()
{
delete str;
}
void ASCII85Stream::reset()
{
str->reset();
index = n = 0;
eof = false;
}
int ASCII85Stream::lookChar()
{
int k;
unsigned long t;
if (index >= n) {
if (eof)
return EOF;
index = 0;
do {
c[0] = str->getChar();
} while (Lexer::isSpace(c[0]));
if (c[0] == '~' || c[0] == EOF) {
eof = true;
n = 0;
return EOF;
} else if (c[0] == 'z') {
b[0] = b[1] = b[2] = b[3] = 0;
n = 4;
} else {
for (k = 1; k < 5; ++k) {
do {
c[k] = str->getChar();
} while (Lexer::isSpace(c[k]));
if (c[k] == '~' || c[k] == EOF)
break;
}
n = k - 1;
if (k < 5 && (c[k] == '~' || c[k] == EOF)) {
for (++k; k < 5; ++k)
c[k] = 0x21 + 84;
eof = true;
}
t = 0;
for (k = 0; k < 5; ++k)
t = t * 85 + (c[k] - 0x21);
for (k = 3; k >= 0; --k) {
b[k] = (int)(t & 0xff);
t >>= 8;
}
}
}
return b[index];
}
GooString *ASCII85Stream::getPSFilter(int psLevel, const char *indent)
{
GooString *s;
if (psLevel < 2) {
return nullptr;
}
if (!(s = str->getPSFilter(psLevel, indent))) {
return nullptr;
}
s->append(indent)->append("/ASCII85Decode filter\n");
return s;
}
bool ASCII85Stream::isBinary(bool last) const
{
return str->isBinary(false);
}
//------------------------------------------------------------------------
// LZWStream
//------------------------------------------------------------------------
LZWStream::LZWStream(Stream *strA, int predictor, int columns, int colors, int bits, int earlyA) : FilterStream(strA)
{
if (predictor != 1) {
pred = new StreamPredictor(this, predictor, columns, colors, bits);
if (!pred->isOk()) {
delete pred;
pred = nullptr;
}
} else {
pred = nullptr;
}
early = earlyA;
eof = false;
inputBits = 0;
clearTable();
}
LZWStream::~LZWStream()
{
if (pred) {
delete pred;
}
delete str;
}
int LZWStream::getChar()
{
if (pred) {
return pred->getChar();
}
if (eof) {
return EOF;
}
if (seqIndex >= seqLength) {
if (!processNextCode()) {
return EOF;
}
}
return seqBuf[seqIndex++];
}
int LZWStream::lookChar()
{
if (pred) {
return pred->lookChar();
}
if (eof) {
return EOF;
}
if (seqIndex >= seqLength) {
if (!processNextCode()) {
return EOF;
}
}
return seqBuf[seqIndex];
}
void LZWStream::getRawChars(int nChars, int *buffer)
{
for (int i = 0; i < nChars; ++i)
buffer[i] = doGetRawChar();
}
int LZWStream::getRawChar()
{
return doGetRawChar();
}
int LZWStream::getChars(int nChars, unsigned char *buffer)
{
int n, m;
if (pred) {
return pred->getChars(nChars, buffer);
}
if (eof) {
return 0;
}
n = 0;
while (n < nChars) {
if (seqIndex >= seqLength) {
if (!processNextCode()) {
break;
}
}
m = seqLength - seqIndex;
if (m > nChars - n) {
m = nChars - n;
}
memcpy(buffer + n, seqBuf + seqIndex, m);
seqIndex += m;
n += m;
}
return n;
}
void LZWStream::reset()
{
str->reset();
eof = false;
inputBits = 0;
clearTable();
}
bool LZWStream::processNextCode()
{
int code;
int nextLength;
int i, j;
// check for EOF
if (eof) {
return false;
}
// check for eod and clear-table codes
start:
code = getCode();
if (code == EOF || code == 257) {
eof = true;
return false;
}
if (code == 256) {
clearTable();
goto start;
}
// process the next code
nextLength = seqLength + 1;
if (code < 256) {
seqBuf[0] = code;
seqLength = 1;
} else if (code < nextCode) {
seqLength = table[code].length;
for (i = seqLength - 1, j = code; i > 0; --i) {
seqBuf[i] = table[j].tail;
j = table[j].head;
}
seqBuf[0] = j;
} else if (code == nextCode) {
seqBuf[seqLength] = newChar;
++seqLength;
} else {
error(errSyntaxError, getPos(), "Bad LZW stream - unexpected code");
eof = true;
return false;
}
newChar = seqBuf[0];
if (first) {
first = false;
} else {
if (nextCode < 4097) {
table[nextCode].length = nextLength;
table[nextCode].head = prevCode;
table[nextCode].tail = newChar;
++nextCode;
}
if (nextCode + early == 512)
nextBits = 10;
else if (nextCode + early == 1024)
nextBits = 11;
else if (nextCode + early == 2048)
nextBits = 12;
}
prevCode = code;
// reset buffer
seqIndex = 0;
return true;
}
void LZWStream::clearTable()
{
nextCode = 258;
nextBits = 9;
seqIndex = seqLength = 0;
first = true;
newChar = 0;
}
int LZWStream::getCode()
{
int c;
int code;
while (inputBits < nextBits) {
if ((c = str->getChar()) == EOF)
return EOF;
inputBuf = (inputBuf << 8) | static_cast<unsigned>(c & 0xff);
inputBits += 8;
}
code = static_cast<signed>((inputBuf >> (inputBits - nextBits)) & ((1 << nextBits) - 1));
inputBits -= nextBits;
return code;
}
GooString *LZWStream::getPSFilter(int psLevel, const char *indent)
{
GooString *s;
if (psLevel < 2 || pred) {
return nullptr;
}
if (!(s = str->getPSFilter(psLevel, indent))) {
return nullptr;
}
s->append(indent)->append("<< ");
if (!early) {
s->append("/EarlyChange 0 ");
}
s->append(">> /LZWDecode filter\n");
return s;
}
bool LZWStream::isBinary(bool last) const
{
return str->isBinary(true);
}
//------------------------------------------------------------------------
// RunLengthStream
//------------------------------------------------------------------------
RunLengthStream::RunLengthStream(Stream *strA) : FilterStream(strA)
{
bufPtr = bufEnd = buf;
eof = false;
}
RunLengthStream::~RunLengthStream()
{
delete str;
}
void RunLengthStream::reset()
{
str->reset();
bufPtr = bufEnd = buf;
eof = false;
}
int RunLengthStream::getChars(int nChars, unsigned char *buffer)
{
int n, m;
n = 0;
while (n < nChars) {
if (bufPtr >= bufEnd) {
if (!fillBuf()) {
break;
}
}
m = (int)(bufEnd - bufPtr);
if (m > nChars - n) {
m = nChars - n;
}
memcpy(buffer + n, bufPtr, m);
bufPtr += m;
n += m;
}
return n;
}
GooString *RunLengthStream::getPSFilter(int psLevel, const char *indent)
{
GooString *s;
if (psLevel < 2) {
return nullptr;
}
if (!(s = str->getPSFilter(psLevel, indent))) {
return nullptr;
}
s->append(indent)->append("/RunLengthDecode filter\n");
return s;
}
bool RunLengthStream::isBinary(bool last) const
{
return str->isBinary(true);
}
bool RunLengthStream::fillBuf()
{
int c;
int n, i;
if (eof)
return false;
c = str->getChar();
if (c == 0x80 || c == EOF) {
eof = true;
return false;
}
if (c < 0x80) {
n = c + 1;
for (i = 0; i < n; ++i)
buf[i] = (char)str->getChar();
} else {
n = 0x101 - c;
c = str->getChar();
for (i = 0; i < n; ++i)
buf[i] = (char)c;
}
bufPtr = buf;
bufEnd = buf + n;
return true;
}
//------------------------------------------------------------------------
// CCITTFaxStream
//------------------------------------------------------------------------
CCITTFaxStream::CCITTFaxStream(Stream *strA, int encodingA, bool endOfLineA, bool byteAlignA, int columnsA, int rowsA, bool endOfBlockA, bool blackA, int damagedRowsBeforeErrorA) : FilterStream(strA)
{
encoding = encodingA;
endOfLine = endOfLineA;
byteAlign = byteAlignA;
columns = columnsA;
damagedRowsBeforeError = damagedRowsBeforeErrorA;
if (columns < 1) {
columns = 1;
} else if (columns > INT_MAX - 2) {
columns = INT_MAX - 2;
}
rows = rowsA;
endOfBlock = endOfBlockA;
black = blackA;
// 0 <= codingLine[0] < codingLine[1] < ... < codingLine[n] = columns
// ---> max codingLine size = columns + 1
// refLine has one extra guard entry at the end
// ---> max refLine size = columns + 2
codingLine = (int *)gmallocn_checkoverflow(columns + 1, sizeof(int));
refLine = (int *)gmallocn_checkoverflow(columns + 2, sizeof(int));
if (codingLine != nullptr && refLine != nullptr) {
eof = false;
codingLine[0] = columns;
} else {
eof = true;
}
row = 0;
nextLine2D = encoding < 0;
inputBits = 0;
a0i = 0;
outputBits = 0;
buf = EOF;
}
CCITTFaxStream::~CCITTFaxStream()
{
delete str;
gfree(refLine);
gfree(codingLine);
}
void CCITTFaxStream::ccittReset(bool unfiltered)
{
if (unfiltered)
str->unfilteredReset();
else
str->reset();
row = 0;
nextLine2D = encoding < 0;
inputBits = 0;
a0i = 0;
outputBits = 0;
buf = EOF;
}
void CCITTFaxStream::unfilteredReset()
{
ccittReset(true);
}
void CCITTFaxStream::reset()
{
int code1;
ccittReset(false);
if (codingLine != nullptr && refLine != nullptr) {
eof = false;
codingLine[0] = columns;
} else {
eof = true;
}
// skip any initial zero bits and end-of-line marker, and get the 2D
// encoding tag
while ((code1 = lookBits(12)) == 0) {
eatBits(1);
}
if (code1 == 0x001) {
eatBits(12);
endOfLine = true;
}
if (encoding > 0) {
nextLine2D = !lookBits(1);
eatBits(1);
}
}
inline void CCITTFaxStream::addPixels(int a1, int blackPixels)
{
if (a1 > codingLine[a0i]) {
if (a1 > columns) {
error(errSyntaxError, getPos(), "CCITTFax row is wrong length ({0:d})", a1);
err = true;
a1 = columns;
}
if ((a0i & 1) ^ blackPixels) {
++a0i;
}
codingLine[a0i] = a1;
}
}
inline void CCITTFaxStream::addPixelsNeg(int a1, int blackPixels)
{
if (a1 > codingLine[a0i]) {
if (a1 > columns) {
error(errSyntaxError, getPos(), "CCITTFax row is wrong length ({0:d})", a1);
err = true;
a1 = columns;
}
if ((a0i & 1) ^ blackPixels) {
++a0i;
}
codingLine[a0i] = a1;
} else if (a1 < codingLine[a0i]) {
if (a1 < 0) {
error(errSyntaxError, getPos(), "Invalid CCITTFax code");
err = true;
a1 = columns;
}
while (a0i > 0 && a1 <= codingLine[a0i - 1]) {
--a0i;
}
codingLine[a0i] = a1;
}
}
int CCITTFaxStream::lookChar()
{
int code1, code2, code3;
int b1i, blackPixels, i, bits;
bool gotEOL;
if (buf != EOF) {
return buf;
}
// read the next row
if (outputBits == 0) {
// if at eof just return EOF
if (eof) {
return EOF;
}
err = false;
// 2-D encoding
if (nextLine2D) {
for (i = 0; i < columns && codingLine[i] < columns; ++i) {
refLine[i] = codingLine[i];
}
for (; i < columns + 2; ++i) {
refLine[i] = columns;
}
codingLine[0] = 0;
a0i = 0;
b1i = 0;
blackPixels = 0;
// invariant:
// refLine[b1i-1] <= codingLine[a0i] < refLine[b1i] < refLine[b1i+1]
// <= columns
// exception at left edge:
// codingLine[a0i = 0] = refLine[b1i = 0] = 0 is possible
// exception at right edge:
// refLine[b1i] = refLine[b1i+1] = columns is possible
while (codingLine[a0i] < columns && !err) {
code1 = getTwoDimCode();
switch (code1) {
case twoDimPass:
if (likely(b1i + 1 < columns + 2)) {
addPixels(refLine[b1i + 1], blackPixels);
if (refLine[b1i + 1] < columns) {
b1i += 2;
}
}
break;
case twoDimHoriz:
code1 = code2 = 0;
if (blackPixels) {
do {
code1 += code3 = getBlackCode();
} while (code3 >= 64);
do {
code2 += code3 = getWhiteCode();
} while (code3 >= 64);
} else {
do {
code1 += code3 = getWhiteCode();
} while (code3 >= 64);
do {
code2 += code3 = getBlackCode();
} while (code3 >= 64);
}
addPixels(codingLine[a0i] + code1, blackPixels);
if (codingLine[a0i] < columns) {
addPixels(codingLine[a0i] + code2, blackPixels ^ 1);
}
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
break;
case twoDimVertR3:
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
addPixels(refLine[b1i] + 3, blackPixels);
blackPixels ^= 1;
if (codingLine[a0i] < columns) {
++b1i;
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
}
break;
case twoDimVertR2:
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
addPixels(refLine[b1i] + 2, blackPixels);
blackPixels ^= 1;
if (codingLine[a0i] < columns) {
++b1i;
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
}
break;
case twoDimVertR1:
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
addPixels(refLine[b1i] + 1, blackPixels);
blackPixels ^= 1;
if (codingLine[a0i] < columns) {
++b1i;
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
}
break;
case twoDimVert0:
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
addPixels(refLine[b1i], blackPixels);
blackPixels ^= 1;
if (codingLine[a0i] < columns) {
++b1i;
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
}
break;
case twoDimVertL3:
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
addPixelsNeg(refLine[b1i] - 3, blackPixels);
blackPixels ^= 1;
if (codingLine[a0i] < columns) {
if (b1i > 0) {
--b1i;
} else {
++b1i;
}
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
}
break;
case twoDimVertL2:
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
addPixelsNeg(refLine[b1i] - 2, blackPixels);
blackPixels ^= 1;
if (codingLine[a0i] < columns) {
if (b1i > 0) {
--b1i;
} else {
++b1i;
}
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
}
break;
case twoDimVertL1:
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
addPixelsNeg(refLine[b1i] - 1, blackPixels);
blackPixels ^= 1;
if (codingLine[a0i] < columns) {
if (b1i > 0) {
--b1i;
} else {
++b1i;
}
while (refLine[b1i] <= codingLine[a0i] && refLine[b1i] < columns) {
b1i += 2;
if (unlikely(b1i > columns + 1)) {
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
err = true;
break;
}
}
}
break;
case EOF:
addPixels(columns, 0);
eof = true;
break;
default:
error(errSyntaxError, getPos(), "Bad 2D code {0:04x} in CCITTFax stream", code1);
addPixels(columns, 0);
err = true;
break;
}
}
// 1-D encoding
} else {
codingLine[0] = 0;
a0i = 0;
blackPixels = 0;
while (codingLine[a0i] < columns) {
code1 = 0;
if (blackPixels) {
do {
code1 += code3 = getBlackCode();
} while (code3 >= 64);
} else {
do {
code1 += code3 = getWhiteCode();
} while (code3 >= 64);
}
addPixels(codingLine[a0i] + code1, blackPixels);
blackPixels ^= 1;
}
}
// check for end-of-line marker, skipping over any extra zero bits
// (if EncodedByteAlign is true and EndOfLine is false, there can
// be "false" EOL markers -- i.e., if the last n unused bits in
// row i are set to zero, and the first 11-n bits in row i+1
// happen to be zero -- so we don't look for EOL markers in this
// case)
gotEOL = false;
if (!endOfBlock && row == rows - 1) {
eof = true;
} else if (endOfLine || !byteAlign) {
code1 = lookBits(12);
if (endOfLine) {
while (code1 != EOF && code1 != 0x001) {
eatBits(1);
code1 = lookBits(12);
}
} else {
while (code1 == 0) {
eatBits(1);
code1 = lookBits(12);
}
}
if (code1 == 0x001) {
eatBits(12);
gotEOL = true;
}
}
// byte-align the row
// (Adobe apparently doesn't do byte alignment after EOL markers
// -- I've seen CCITT image data streams in two different formats,
// both with the byteAlign flag set:
// 1. xx:x0:01:yy:yy
// 2. xx:00:1y:yy:yy
// where xx is the previous line, yy is the next line, and colons
// separate bytes.)
if (byteAlign && !gotEOL) {
inputBits &= ~7;
}
// check for end of stream
if (lookBits(1) == EOF) {
eof = true;
}
// get 2D encoding tag
if (!eof && encoding > 0) {
nextLine2D = !lookBits(1);
eatBits(1);
}
// check for end-of-block marker
if (endOfBlock && !endOfLine && byteAlign) {
// in this case, we didn't check for an EOL code above, so we
// need to check here
code1 = lookBits(24);
if (code1 == 0x001001) {
eatBits(12);
gotEOL = true;
}
}
if (endOfBlock && gotEOL) {
code1 = lookBits(12);
if (code1 == 0x001) {
eatBits(12);
if (encoding > 0) {
lookBits(1);
eatBits(1);
}
if (encoding >= 0) {
for (i = 0; i < 4; ++i) {
code1 = lookBits(12);
if (code1 != 0x001) {
error(errSyntaxError, getPos(), "Bad RTC code in CCITTFax stream");
}
eatBits(12);
if (encoding > 0) {
lookBits(1);
eatBits(1);
}
}
}
eof = true;
}
// look for an end-of-line marker after an error -- we only do
// this if we know the stream contains end-of-line markers because
// the "just plow on" technique tends to work better otherwise
} else if (err && endOfLine) {
while (true) {
code1 = lookBits(13);
if (code1 == EOF) {
eof = true;
return EOF;
}
if ((code1 >> 1) == 0x001) {
break;
}
eatBits(1);
}
eatBits(12);
if (encoding > 0) {
eatBits(1);
nextLine2D = !(code1 & 1);
}
}
// set up for output
if (codingLine[0] > 0) {
outputBits = codingLine[a0i = 0];
} else {
outputBits = codingLine[a0i = 1];
}
++row;
}
// get a byte
if (outputBits >= 8) {
buf = (a0i & 1) ? 0x00 : 0xff;
outputBits -= 8;
if (outputBits == 0 && codingLine[a0i] < columns) {
++a0i;
outputBits = codingLine[a0i] - codingLine[a0i - 1];
}
} else {
bits = 8;
buf = 0;
do {
if (outputBits > bits) {
buf <<= bits;
if (!(a0i & 1)) {
buf |= 0xff >> (8 - bits);
}
outputBits -= bits;
bits = 0;
} else {
buf <<= outputBits;
if (!(a0i & 1)) {
buf |= 0xff >> (8 - outputBits);
}
bits -= outputBits;
outputBits = 0;
if (codingLine[a0i] < columns) {
++a0i;
if (unlikely(a0i > columns)) {
error(errSyntaxError, getPos(), "Bad bits {0:04x} in CCITTFax stream", bits);
err = true;
break;
}
outputBits = codingLine[a0i] - codingLine[a0i - 1];
} else if (bits > 0) {
buf <<= bits;
bits = 0;
}
}
} while (bits);
}
if (black) {
buf ^= 0xff;
}
return buf;
}
short CCITTFaxStream::getTwoDimCode()
{
int code;
const CCITTCode *p;
int n;
code = 0; // make gcc happy
if (endOfBlock) {
if ((code = lookBits(7)) != EOF) {
p = &twoDimTab1[code];
if (p->bits > 0) {
eatBits(p->bits);
return p->n;
}
}
} else {
for (n = 1; n <= 7; ++n) {
if ((code = lookBits(n)) == EOF) {
break;
}
if (n < 7) {
code <<= 7 - n;
}
p = &twoDimTab1[code];
if (p->bits == n) {
eatBits(n);
return p->n;
}
}
}
error(errSyntaxError, getPos(), "Bad two dim code ({0:04x}) in CCITTFax stream", code);
return EOF;
}
short CCITTFaxStream::getWhiteCode()
{
short code;
const CCITTCode *p;
int n;
code = 0; // make gcc happy
if (endOfBlock) {
code = lookBits(12);
if (code == EOF) {
return 1;
}
if ((code >> 5) == 0) {
p = &whiteTab1[code];
} else {
p = &whiteTab2[code >> 3];
}
if (p->bits > 0) {
eatBits(p->bits);
return p->n;
}
} else {
for (n = 1; n <= 9; ++n) {
code = lookBits(n);
if (code == EOF) {
return 1;
}
if (n < 9) {
code <<= 9 - n;
}
p = &whiteTab2[code];
if (p->bits == n) {
eatBits(n);
return p->n;
}
}
for (n = 11; n <= 12; ++n) {
code = lookBits(n);
if (code == EOF) {
return 1;
}
if (n < 12) {
code <<= 12 - n;
}
p = &whiteTab1[code];
if (p->bits == n) {
eatBits(n);
return p->n;
}
}
}
error(errSyntaxError, getPos(), "Bad white code ({0:04x}) in CCITTFax stream", code);
// eat a bit and return a positive number so that the caller doesn't
// go into an infinite loop
eatBits(1);
return 1;
}
short CCITTFaxStream::getBlackCode()
{
short code;
const CCITTCode *p;
int n;
code = 0; // make gcc happy
if (endOfBlock) {
code = lookBits(13);
if (code == EOF) {
return 1;
}
if ((code >> 7) == 0) {
p = &blackTab1[code];
} else if ((code >> 9) == 0 && (code >> 7) != 0) {
p = &blackTab2[(code >> 1) - 64];
} else {
p = &blackTab3[code >> 7];
}
if (p->bits > 0) {
eatBits(p->bits);
return p->n;
}
} else {
for (n = 2; n <= 6; ++n) {
code = lookBits(n);
if (code == EOF) {
return 1;
}
if (n < 6) {
code <<= 6 - n;
}
p = &blackTab3[code];
if (p->bits == n) {
eatBits(n);
return p->n;
}
}
for (n = 7; n <= 12; ++n) {
code = lookBits(n);
if (code == EOF) {
return 1;
}
if (n < 12) {
code <<= 12 - n;
}
if (code >= 64) {
p = &blackTab2[code - 64];
if (p->bits == n) {
eatBits(n);
return p->n;
}
}
}
for (n = 10; n <= 13; ++n) {
code = lookBits(n);
if (code == EOF) {
return 1;
}
if (n < 13) {
code <<= 13 - n;
}
p = &blackTab1[code];
if (p->bits == n) {
eatBits(n);
return p->n;
}
}
}
error(errSyntaxError, getPos(), "Bad black code ({0:04x}) in CCITTFax stream", code);
// eat a bit and return a positive number so that the caller doesn't
// go into an infinite loop
eatBits(1);
return 1;
}
short CCITTFaxStream::lookBits(int n)
{
int c;
while (inputBits < n) {
if ((c = str->getChar()) == EOF) {
if (inputBits == 0) {
return EOF;
}
// near the end of the stream, the caller may ask for more bits
// than are available, but there may still be a valid code in
// however many bits are available -- we need to return correct
// data in this case
return (inputBuf << (n - inputBits)) & (0xffffffff >> (32 - n));
}
inputBuf = (inputBuf << 8) + c;
inputBits += 8;
}
return (inputBuf >> (inputBits - n)) & (0xffffffff >> (32 - n));
}
GooString *CCITTFaxStream::getPSFilter(int psLevel, const char *indent)
{
GooString *s;
char s1[50];
if (psLevel < 2) {
return nullptr;
}
if (!(s = str->getPSFilter(psLevel, indent))) {
return nullptr;
}
s->append(indent)->append("<< ");
if (encoding != 0) {
sprintf(s1, "/K %d ", encoding);
s->append(s1);
}
if (endOfLine) {
s->append("/EndOfLine true ");
}
if (byteAlign) {
s->append("/EncodedByteAlign true ");
}
sprintf(s1, "/Columns %d ", columns);
s->append(s1);
if (rows != 0) {
sprintf(s1, "/Rows %d ", rows);
s->append(s1);
}
if (!endOfBlock) {
s->append("/EndOfBlock false ");
}
if (black) {
s->append("/BlackIs1 true ");
}
s->append(">> /CCITTFaxDecode filter\n");
return s;
}
bool CCITTFaxStream::isBinary(bool last) const
{
return str->isBinary(true);
}
#ifndef ENABLE_LIBJPEG
//------------------------------------------------------------------------
// DCTStream
//------------------------------------------------------------------------
// IDCT constants (20.12 fixed point format)
# define dctCos1 4017 // cos(pi/16)
# define dctSin1 799 // sin(pi/16)
# define dctCos3 3406 // cos(3*pi/16)
# define dctSin3 2276 // sin(3*pi/16)
# define dctCos6 1567 // cos(6*pi/16)
# define dctSin6 3784 // sin(6*pi/16)
# define dctSqrt2 5793 // sqrt(2)
# define dctSqrt1d2 2896 // sqrt(2) / 2
// color conversion parameters (16.16 fixed point format)
# define dctCrToR 91881 // 1.4020
# define dctCbToG -22553 // -0.3441363
# define dctCrToG -46802 // -0.71413636
# define dctCbToB 116130 // 1.772
// clip [-256,511] --> [0,255]
# define dctClipOffset 256
# define dctClipLength 768
static unsigned char dctClip[dctClipLength];
static int dctClipInit = 0;
// zig zag decode map
static const int dctZigZag[64] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 };
DCTStream::DCTStream(Stream *strA, int colorXformA, Dict *dict, int recursion) : FilterStream(strA)
{
int i, j;
colorXform = colorXformA;
progressive = interleaved = false;
width = height = 0;
mcuWidth = mcuHeight = 0;
numComps = 0;
comp = 0;
x = y = dy = 0;
for (i = 0; i < 4; ++i) {
for (j = 0; j < 32; ++j) {
rowBuf[i][j] = nullptr;
}
frameBuf[i] = nullptr;
}
if (!dctClipInit) {
for (i = -256; i < 0; ++i)
dctClip[dctClipOffset + i] = 0;
for (i = 0; i < 256; ++i)
dctClip[dctClipOffset + i] = i;
for (i = 256; i < 512; ++i)
dctClip[dctClipOffset + i] = 255;
dctClipInit = 1;
}
}
DCTStream::~DCTStream()
{
close();
delete str;
}
void DCTStream::dctReset(bool unfiltered)
{
if (unfiltered)
str->unfilteredReset();
else
str->reset();
progressive = interleaved = false;
width = height = 0;
numComps = 0;
numQuantTables = 0;
numDCHuffTables = 0;
numACHuffTables = 0;
gotJFIFMarker = false;
gotAdobeMarker = false;
restartInterval = 0;
}
void DCTStream::unfilteredReset()
{
dctReset(true);
}
void DCTStream::reset()
{
int i, j;
dctReset(false);
if (!readHeader()) {
y = height;
return;
}
// compute MCU size
if (numComps == 1) {
compInfo[0].hSample = compInfo[0].vSample = 1;
}
mcuWidth = compInfo[0].hSample;
mcuHeight = compInfo[0].vSample;
for (i = 1; i < numComps; ++i) {
if (compInfo[i].hSample > mcuWidth) {
mcuWidth = compInfo[i].hSample;
}
if (compInfo[i].vSample > mcuHeight) {
mcuHeight = compInfo[i].vSample;
}
}
mcuWidth *= 8;
mcuHeight *= 8;
// figure out color transform
if (colorXform == -1) {
if (numComps == 3) {
if (gotJFIFMarker) {
colorXform = 1;
} else if (compInfo[0].id == 82 && compInfo[1].id == 71 && compInfo[2].id == 66) { // ASCII "RGB"
colorXform = 0;
} else {
colorXform = 1;
}
} else {
colorXform = 0;
}
}
if (progressive || !interleaved) {
// allocate a buffer for the whole image
bufWidth = ((width + mcuWidth - 1) / mcuWidth) * mcuWidth;
bufHeight = ((height + mcuHeight - 1) / mcuHeight) * mcuHeight;
if (bufWidth <= 0 || bufHeight <= 0 || bufWidth > INT_MAX / bufWidth / (int)sizeof(int)) {
error(errSyntaxError, getPos(), "Invalid image size in DCT stream");
y = height;
return;
}
for (i = 0; i < numComps; ++i) {
frameBuf[i] = (int *)gmallocn(bufWidth * bufHeight, sizeof(int));
memset(frameBuf[i], 0, bufWidth * bufHeight * sizeof(int));
}
// read the image data
do {
restartMarker = 0xd0;
restart();
readScan();
} while (readHeader());
// decode
decodeImage();
// initialize counters
comp = 0;
x = 0;
y = 0;
} else {
// allocate a buffer for one row of MCUs
bufWidth = ((width + mcuWidth - 1) / mcuWidth) * mcuWidth;
for (i = 0; i < numComps; ++i) {
for (j = 0; j < mcuHeight; ++j) {
rowBuf[i][j] = (unsigned char *)gmallocn(bufWidth, sizeof(unsigned char));
}
}
// initialize counters
comp = 0;
x = 0;
y = 0;
dy = mcuHeight;
restartMarker = 0xd0;
restart();
}
}
void DCTStream::close()
{
int i, j;
for (i = 0; i < 4; ++i) {
for (j = 0; j < 32; ++j) {
gfree(rowBuf[i][j]);
rowBuf[i][j] = nullptr;
}
gfree(frameBuf[i]);
frameBuf[i] = nullptr;
}
FilterStream::close();
}
int DCTStream::getChar()
{
int c;
if (y >= height) {
return EOF;
}
if (progressive || !interleaved) {
c = frameBuf[comp][y * bufWidth + x];
if (++comp == numComps) {
comp = 0;
if (++x == width) {
x = 0;
++y;
}
}
} else {
if (dy >= mcuHeight) {
if (!readMCURow()) {
y = height;
return EOF;
}
comp = 0;
x = 0;
dy = 0;
}
c = rowBuf[comp][dy][x];
if (++comp == numComps) {
comp = 0;
if (++x == width) {
x = 0;
++y;
++dy;
if (y == height) {
readTrailer();
}
}
}
}
return c;
}
int DCTStream::lookChar()
{
if (y >= height) {
return EOF;
}
if (progressive || !interleaved) {
return frameBuf[comp][y * bufWidth + x];
} else {
if (dy >= mcuHeight) {
if (!readMCURow()) {
y = height;
return EOF;
}
comp = 0;
x = 0;
dy = 0;
}
return rowBuf[comp][dy][x];
}
}
void DCTStream::restart()
{
int i;
inputBits = 0;
restartCtr = restartInterval;
for (i = 0; i < numComps; ++i) {
compInfo[i].prevDC = 0;
}
eobRun = 0;
}
// Read one row of MCUs from a sequential JPEG stream.
bool DCTStream::readMCURow()
{
int data1[64];
unsigned char data2[64];
unsigned char *p1, *p2;
int pY, pCb, pCr, pR, pG, pB;
int h, v, horiz, vert, hSub, vSub;
int x1, x2, y2, x3, y3, x4, y4, x5, y5, cc, i;
int c;
for (x1 = 0; x1 < width; x1 += mcuWidth) {
// deal with restart marker
if (restartInterval > 0 && restartCtr == 0) {
c = readMarker();
if (c != restartMarker) {
error(errSyntaxError, getPos(), "Bad DCT data: incorrect restart marker");
return false;
}
if (++restartMarker == 0xd8)
restartMarker = 0xd0;
restart();
|
__label__pos
| 0.999992 |
Skip to main content
Connect Databricks and Stripe products in our serverless environment
Use this template to Read rows from Databricks using them to create products in Stripe.
Share
Read rows from Databricks
Used integrations:
class DatabricksSourceSelect {
async init() {
// TODO: Create your databricks credential
// More info at https://yepcode.io/docs/integrations/databricks/#credential-configuration
this.databricks = yepcode.integration.databricks(
"your-databricks-credential-name"
);
this.session = await this.databricks.openSession();
}
async fetch(publish, done) {
// TODO: Customize your sql query
// More info at: https://github.com/databricks/databricks-sql-nodejs/blob/main/tests/e2e/batched_fetch.test.js#L21
const queryOperation = await this.session.executeStatement(
`SELECT * FROM your_table_name`, {
runAsync: true
}
);
const results = await queryOperation.fetchAll();
await queryOperation.close();
for (const result of results) {
await publish(result);
}
done();
}
async close() {
await this.session.close();
await this.databricks.close();
}
}
Do you need help solving this integration with YepCode?
Let's talk
Create products in Stripe
Used integrations:
class StripeTargetCreateProduct {
async init() {
// TODO: Create your stripe credential
// More info at https://yepcode.io/docs/integrations/stripe/#credential-configuration
this.stripe = yepcode.integration.stripe("your-stripe-credential-name");
}
async consume(item) {
// TODO: Customize your product creation attributes
// More info at: https://stripe.com/docs/api/products/create?lang=node
await this.stripe.products.create({
name: item.name,
});
}
async close() {}
}
Other combinations
View recipes
Related recipes
FAQs
YepCode is a SaaS platform that enables the creation, execution and monitoring of integrations and automations using source code in a serverless environment.
We like to call it the Zapier for developers, since we bring all the agility and benefits of NoCode tools (avoid server provisioning, environment configuration, deployments,...), but with all the power of being able to use a programming language like JavaScript or Python.
These recipes are an excellent starting point for creating your own YepCode processes and solving complex integration and automation problems.
You only have to complete the sign up form and your account will be created with our FREE plan (no credit card required).
YepCode has been created with a clear enterprise focus, offering a multi-tenant environment, team management capabilities, high security and auditing standards, Identity Provider (IdP) integrations, and on-premise options. It serves as the Swiss army knife for engineering teams, especially those requiring the extraction or transmission of information to external systems. It excels in scenarios demanding flexibility and adaptability to change within the process.
Sure! You only need to configure YepCode servers to establish a connection with that service. Check our docs page to get more information.
|
__label__pos
| 0.96961 |
Home Interesting facts How ‘Hibernate’ is included in ‘Shut down’ options in Windows OS?
How ‘Hibernate’ is included in ‘Shut down’ options in Windows OS?
Every thought how Microsoft got the idea of having Hibernate option in Shut down menu? Microsoft would have inspired the Hibernate option from science term
1039
0
SHARE
Hibernate option in shutdown is included in Windows OS after science term
Hibernate option in shutdown is included in Windows OS after science term
Hibernate option in Shut down.
Every thought how Microsoft got the idea of having Hibernate option in Shut down menu? Microsoft would have inspired the Hibernate option from science term called ‘Hibernation.’
Yes, The word Hibernate is derived from the word Hibernation.
It’s Interesting !!. But what is Hibernation, probably 5th standard student knows this term.
As per Wiki – “Hibernation is a state of inactivity and metabolic depression in endotherms” (Endotherms means animals that are dependent on or capable of the internal generation of heat). Hibernation is observed in animals that live in cold areas. During this state, animals go into deep sleep for several days, weeks or months depending on the species, temperature, time of year and individual’s body condition. The function of hibernation is to protect from cold and to conserve energy during a period when sufficient food is unavailable. In hibernation, the animal’s body temperature is decreased. Breathing and heart rates slow down. Once animals come back after hibernation, they will start their routine activity, say, searching for food.
Hibernate option in the shutdown is included in Windows OS after science term
Hibernate option in the shutdown Windows 7 Operating System
In Computer Language
Once you select Hibernate option, it saves all the open documents and running applications to the hard disk and shuts down the computer. That means once your computer is in Hibernate mode, it uses zero power. Once you turn on the computer, it will resume everything where you left off. Selecting hibernate mode is useful, when you want to Shutdown the computer for a long time but you don’t want to close the applications that you are currently working on. Hibernate option is extremely helpful when you want to turn off the computer without losing work during a power cut or low charging left in a battery in case of a laptop. You can hibernate the computer for days, weeks or even for months.
Comments
|
__label__pos
| 0.971284 |
1. This site uses cookies. By continuing to use this site, you are agreeing to our use of cookies. Learn More.
Download: MSI Afterburner 4.5.0
Discussion in 'Frontpage news' started by Hilbert Hagedoorn, Apr 24, 2018.
1. NaeemTV
NaeemTV New Member
Messages:
2
Likes Received:
0
GPU:
RX Vega 64 LC
MSi Afterburner keep crashing after i updated to windows 10 version 1809 it automatically closes itself after few min without any error message i have tried latest beta as well but no luck
Last edited: Oct 8, 2018
2. Fox2232
Fox2232 Ancient Guru
Messages:
9,680
Likes Received:
2,157
GPU:
5700XT+AW@240Hz
Closes? Is notice in logfile? Some game anti-cheat systems kill AB. Maybe some anti-cheat went crazy on 1809.
Apparently, if it can't be replicated, you have to provide meaningful source of information leading to root cause.
So, check system logs, AB logs, ...
3. Unwinder
Unwinder Moderator Staff Member
Messages:
14,331
Likes Received:
1,364
It is not crashing. New version of BattlEye is silently forcibly closing it on AMD systems without notifying user in any form.
4. CoRsAiR-X1
CoRsAiR-X1 New Member
Messages:
2
Likes Received:
0
GPU:
R9 290| 4 GB
Hi!
I noticed that if I use the MSI afterburner with the latest AMD drivers, on overclocking I'm not stable.
But if I try the old drivers (16.11.2), before Wattman is implemented I can have a stable overclock.
Has anyone of you solved?
How can I solve?
Does anyone have my same problem?
5. Fox2232
Fox2232 Ancient Guru
Messages:
9,680
Likes Received:
2,157
GPU:
5700XT+AW@240Hz
You should consider other cause. New driver utilizes your GPU better => more load/heat => higher v-drop => less stable.
Those things happen all the time, especially if your GPU aged a bit and requires higher voltage on same OC. Or maybe no voltage is enough on given clock and you'll have to downclock a bit.
6. r3lu
r3lu Member
Messages:
12
Likes Received:
0
GPU:
ASUS ROG Strix GTX 1080Ti
Not sure if has been posted before, but the synchronized dual fans for the MSI GeForce RTX 2080 Ti Gaming X TRIO don’t work as they should in manual mode. When setting a custom fan curve or manual speed, fan #1 shows 350-500 RPM higher speed than fan #2, bigger difference the higher you set the fan. At 100% fan speed you get some 3400 RPM vs 2400 RPM. Faster fan is that small one near the front of the card.
When left in the standard auto-mode, both fans spin at proximately the same RPM, all the time. Usually in the range 1350-1650 RPM, depending on temperature.
Maybe you should add a Fan 2 tab with its own separate graph, and just leave the synchronized mode as an option.
7. Unwinder
Unwinder Moderator Staff Member
Messages:
14,331
Likes Received:
1,364
You’re misunderstanding things. Who said that you should get exactly the same RPM on different fans? Fan duty cycle (speed in %) must be the same, but the same % is not supposed to be mapped to exactly the same RPM.
fry178 likes this.
8. r3lu
r3lu Member
Messages:
12
Likes Received:
0
GPU:
ASUS ROG Strix GTX 1080Ti
You are correct. On auto, fan speeds are something like 46% and 59%, for practically identical 1500 RPM on both tachometers. But wouldn’t it be better if we could replicate this percentage offset when setting a custom fan curve? That faster fan becomes noisier much sooner than the other two and is not ideal. If the MSI vbios thinks both fan groups should have the same RPM during gaming, then perhaps it makes sense to duplicate that when manually adjusting for lower temps.
I would imagine other cards might need a different offset, or none at all. But at least there should be some way to tweak that when its needed.
BTW, that OC scanner feature is pretty much spot on with the frequency curve. Great work. Thanks!
9. Unwinder
Unwinder Moderator Staff Member
Messages:
14,331
Likes Received:
1,364
That doesn’t work that way at all, there are no and there won’t be any offsets or attempts to sync RPM and there is no any default “RPM sync” mode like you’re assuming. Sync will always result in syncing fan duty cycle only and I have nothing to add to it.
StewieTech likes this.
10. FAQU
FAQU Member
Messages:
29
Likes Received:
0
GPU:
RX 480 8GB
I've noticed that with the latest AMD Drivers (18.10.1 & 18.10.2) the GPU Usage graph in Afterburner is grayed out , while in Wattman and task manager the GPU Usage is displayed just fine. The issue doesn't exist with older driver versions such as 18.9.3.
I was wondering if anyone else has this issue , and if there is any workaround for it ?
RX 480 8GB
MSI Afterburner 4.6.0 Beta 9
11. romes16
romes16 New Member
Messages:
1
Likes Received:
0
GPU:
EVGA GTX 1060 GB
Randomly MSI Afterburner stopped working, here is error popup. I tried reinstalling 4.50 and
beta but it still wont wor
k.[IMG]
12. Passus
Passus Maha Guru
Messages:
1,092
Likes Received:
202
GPU:
GTX 1060 3GB
13. Mark Taylor
Mark Taylor New Member
Messages:
1
Likes Received:
1
GPU:
Asus RX480 8gb
Just wanted to say thank you for the wonderful utility's i download from Guru they are a godsend.:):):)
Passus likes this.
14. CiroConsentino
CiroConsentino Member
Messages:
16
Likes Received:
0
GPU:
ZOTAC RTX 2080 AMP!
hi there,
I'm about to replace my current EVGA GTX 970 video card by a new ZOTAC RTX 2080 AMP 8GB in a couple days. I'm currently using Afterburner 4.4.2 and RTSS v7.0.2 which doesn't support the Turing video cards. Should I uninstall current version before installing Afterburner v4.6.0 beta 9 + RTSS v7.2.0 final for the new video card ? I never do overclocking or use custom fan profiles, everything is set to auto.
I'm using Windows 7 SP1 64-bits but plan to do a dual boot with Win10 build 1809 later.
Thank you.
15. Hilbert Hagedoorn
Hilbert Hagedoorn Don Vito Corleone Staff Member
Messages:
35,896
Likes Received:
5,004
GPU:
AMD | NVIDIA
It's recommended but most often not at all needed. Give it a go, if you see an issue you can always uninstall or clear the RTSS cache.
16. Athlonite
Athlonite Maha Guru
Messages:
1,274
Likes Received:
25
GPU:
Nitro+RX580 8GB OC
How the heck do you run the OC scanner I don't see a button for it anywhere
17. Amaze
Amaze Ancient Guru
Messages:
3,017
Likes Received:
462
GPU:
1070ti 2114/4375
It's only for Nvidia Turing cards afaik. You have AMD for starters.
Athlonite likes this.
18. Athlonite
Athlonite Maha Guru
Messages:
1,274
Likes Received:
25
GPU:
Nitro+RX580 8GB OC
Well that's crap then isn't it I downloaded it and replaced Trixx
19. JonasBeckman
JonasBeckman Ancient Guru
Messages:
15,515
Likes Received:
1,521
GPU:
Sapphire Vega Pulse
It's a feature using extensions in NVAPI (Used to get data from the GPU among many other things.) and certain code for Turing that's under NDA at least for now though it remains to be seen if NVIDIA might open it up more or if it's going to be closed, probably a pretty complex piece of work and not something that they might want to allow modifications too for the sake of GPU reliability such as being able to change the limits it has.
It can't be directly implemented on AMD and while I am no expert on the subject matter I do not believe AMD's ADL API has the same flexibility for overclocking which is one part of why you need low level hardware access for some of the GPU data in Afterburner if I'm not messing this up completely. :)
(I do know NVIDIA has some pretty interesting things in the NVIDIA API which also goes beyond just getting data from the GPU itself.)
A simpler overclock suite might be feasible, AMD had a very basic 3D demo in their early drivers for example but it wouldn't be anywhere near the same as NVIDIA's AI suite of self diagnosing and testing for stability found on Turing.
(From that to Overdrive to Wattman and it hasn't exactly been smooth for AMD plus underlying changes in the driver occasionally breaking support with third party utilities.)
EDIT: It's both a hardware and software limitation and it's a feature found in NVIDIA's Turing GPU's that can be called from Afterburner but it's under NVIDIA's control and it's their own code.
(There's better and more detailed info in the Rivatuner forum here and the beta topic for this version detailing how this is set up and how it works.)
EDIT: Thinking about it since it's pretty closed I wonder if it might not even go via NVAPI at all and use something entirely different, might be more low level entirely than what I'm thinking on how it's working.
Yes I think that's making a bit more sense at least for the AI overclock feature itself and it's scanner and test suite.
Should just have said that it's a closed-source private feature NVIDIA implemented for Turing which AMD can't use. :)
Last edited: Nov 12, 2018
20. Athlonite
Athlonite Maha Guru
Messages:
1,274
Likes Received:
25
GPU:
Nitro+RX580 8GB OC
Would have been nice if MSI had actually said that on their site or if had of actually been mentioned on the download page although ATI used to have a OC scanner I wonder why they can't make use of it for AMD cards
Share This Page
|
__label__pos
| 0.613676 |
Beefy Boxes and Bandwidth Generously Provided by pair Networks
No such thing as a small change
PerlMonks
Re^5: What is the correct way to finish multithreaded program?
by Anonymous Monk
on May 05, 2014 at 12:24 UTC ( #1085054=note: print w/ replies, xml ) Need Help??
in reply to Re^4: What is the correct way to finish multithreaded program?
in thread What is the correct way to finish multithreaded program?
If you can make the code run .. and reproduce the problem, you can see the only real way to wait is to join everything not merely the immediately joinable :)
Comment on Re^5: What is the correct way to finish multithreaded program?
Log In?
Username:
Password:
What's my password?
Create A New User
Node Status?
node history
Node Type: note [id://1085054]
help
Chatterbox?
and the web crawler heard nothing...
How do I use this? | Other CB clients
Other Users?
Others wandering the Monastery: (7)
As of 2015-05-04 11:29 GMT
Sections?
Information?
Find Nodes?
Leftovers?
Voting Booth?
In my home, the TV remote control is ...
Results (86 votes), past polls
|
__label__pos
| 0.623673 |
ARREGLOS Y LISTAS ENLAZADAS
Definición y características de cada uno de los
temas.
ARREGLOS EN C++
No es una variable; es un grupo de variables conocidas como
elementos
Cada elemento ocupa una posición dentro del grupo
Todos los elementos son del mismo tipo
El nombre del arreglo indica donde se localiza el grupo en la memoria
de la computadora
Los arreglos se clasifican de acuerdo a las dimensiones que tengan
Las dimensiones no tienen relación con el plano Cartesiano; nada que
ver con matemática
Las dimensiones indican como están organizados los elementos dentro
del grupo
Los arreglos de dos dimensiones pueden visualizarse como tablas
Los valores que se guarden en el arreglo se almacenan en los
elementos ya que los elementos son las variables
PARA CREAR ARREGLOS EN C++
Hay que indicar:
1. el tipo de los elementos (ejemplo, int, char,
double, bool o un tipo definido por el
programador)
2. el nombre del arreglo
3. la cantidad de dimensiones y sus tamaños;
cada dimensión comienza con el signo [seguido
por el tamaño de la dimensión y termina con el
signo]
EJEMPLO DE ARREGLO
Ejemplo 1 .
Declaración
int a [3]; // forma una secuencia de tres elementos
Nombre del grupo a
Nombre de los elementos
a [0]. primer elemento
a [1]. segundo elemento
a [2]. tercer elemento
EJEMPLO DE ARREGLOS
Ejemplo #2. Arreglo de dos dimensiones
Declaración
char m [2] [3]; // forma una tabla de dos filas y tres columnas
// cada fila es un arreglo de una dimensión
// la declaración indica que hay dos arreglos de una dimensión
Nombre del grupo
m. indica la localización del grupo en la memoria
Nombre de las filas
m [0]. primera fila. indica la localización de la fila dentro del grupo
m [1]. segunda fila. indica la localización de la fila dentro del grupo
Nombre de los elementos
m [0] [0]. primer elemento
m [0] [1]. segundo elemento
m [0] [2]. tercer elemento
m [1] [0]. cuarto elemento
m [1] [1]. quinto elemento
m [1] [2]. sexto elemento
LISTA ENLAZADA
Lista enlazada.
Una lista enlazada es una estructura dinámica
lineal en la que cada nodo es una estructura con
al menos dos campos:
Info:
Campo que contiene la información de
la lista enlazada (este campo puede ser simple o
compuesto.
Siguiente:
Campo que contiene un puntero
o enlace al siguiente elemento de la
listas.
LISTA ENLAZADA
Nodo:
Existe un puntero o enlace especial que guarda la
dirección del primer nodo y que llamaremos
comienzo (com). El campo enlace del último nodo
será Nulo o NULL.
Una lista enlazada puede representarse de la
forma:
LISTAS ENLAZADAS
Características.
•Los nodos de una lista enlazada no ocupan posiciones
contiguas en memoria.
•EL tamaño de la estructura puede aumentar y disminuir durante
la ejecución del programa.
•Si la lista está vacía, comienzo es NULL.
•La lista se considera llena cuando no existe espacio disponible
para crear una variable dinámica de tipo nodo.
•No todos los lenguajes de programación permiten la
implementación de las listas enlazadas dinámicas.
LISTA ENLAZADA
Notación empleada
Comienzo: puntero o enlace al primer nodo de la lista
enlazada.
Si p es un puntero o enlace a un nodo cualquiera de la lista:
Info (p):
Campo de datos del nodo apuntado por p.
Sig (p):
Campo de enlace del nodo apuntado por p.
RECORRIDO
Se denomina recorrido de una lista enlazada dinámica al
procedimiento mediante el cual accedemos a todos los nodos
de la lista desde el primero al último.
EJEMPLO DE LISTA ENLAZADA
struct NODO
{
int info;
struct NODO *sig;
};
FUNCIÓN recorrido( comienzo): nada
Entorno:
comienzo :
puntero o enlace al primer nodo de la lista.
p:
puntero auxiliar para recorrer la lista enlazada.
Proceso:
Inicio.
p comienzo
EJEMPLO LISTA ENLAZADA
mientras p <> null
visitar nodo (info(p))
p sig(p)
fin mientras
fin
En lenguaje C se implementaría de la siguiente forma:
La función recorrido es:
void recorrido ( struct NODO *comienzo)
{
struct NODO *p;
p = comienzo;
while ( p != NULL )
{
printf (“%d”, p info);
p = p sig ;
}
}
La llamada sería:
struct NODO *comienzo;
...
recorrido (comienzo);
Descargar
Arreglos y listas enlazadas
|
__label__pos
| 0.518873 |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* MMU support
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Yaniv Kamay <[email protected]>
* Avi Kivity <[email protected]>
*/
#include "irq.h"
#include "mmu.h"
#include "x86.h"
#include "kvm_cache_regs.h"
#include "cpuid.h"
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/kern_levels.h>
#include <asm/page.h>
#include <asm/pat.h>
#include <asm/cmpxchg.h>
#include <asm/e820/api.h>
#include <asm/io.h>
#include <asm/vmx.h>
#include <asm/kvm_page_track.h>
#include "trace.h"
/*
* When setting this variable to true it enables Two-Dimensional-Paging
* where the hardware walks 2 page tables:
* 1. the guest-virtual to guest-physical
* 2. while doing 1. it walks guest-physical to host-physical
* If the hardware supports that we don't need to do shadow paging.
*/
bool tdp_enabled = false;
enum {
AUDIT_PRE_PAGE_FAULT,
AUDIT_POST_PAGE_FAULT,
AUDIT_PRE_PTE_WRITE,
AUDIT_POST_PTE_WRITE,
AUDIT_PRE_SYNC,
AUDIT_POST_SYNC
};
#undef MMU_DEBUG
#ifdef MMU_DEBUG
static bool dbg = 0;
module_param(dbg, bool, 0644);
#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
#define MMU_WARN_ON(x) WARN_ON(x)
#else
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
#define MMU_WARN_ON(x) do { } while (0)
#endif
#define PTE_PREFETCH_NUM 8
#define PT_FIRST_AVAIL_BITS_SHIFT 10
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
#define PT64_LEVEL_BITS 9
#define PT64_LEVEL_SHIFT(level) \
(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
#define PT64_INDEX(address, level)\
(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
#define PT32_LEVEL_BITS 10
#define PT32_LEVEL_SHIFT(level) \
(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
#define PT32_LVL_OFFSET_MASK(level) \
(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT32_LEVEL_BITS))) - 1))
#define PT32_INDEX(address, level)\
(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
#else
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
#endif
#define PT64_LVL_ADDR_MASK(level) \
(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT64_LEVEL_BITS))) - 1))
#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
#define PT32_LVL_ADDR_MASK(level) \
(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT32_LEVEL_BITS))) - 1))
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
#define ACC_USER_MASK PT_USER_MASK
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
/* The mask for the R/X bits in EPT PTEs */
#define PT64_EPT_READABLE_MASK 0x1ull
#define PT64_EPT_EXECUTABLE_MASK 0x4ull
#include <trace/events/kvm.h>
#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
/* make pte_list_desc fit well in cache line */
#define PTE_LIST_EXT 3
/*
* Return values of handle_mmio_page_fault and mmu.page_fault:
* RET_PF_RETRY: let CPU fault again on the address.
* RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
*
* For handle_mmio_page_fault only:
* RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
*/
enum {
RET_PF_RETRY = 0,
RET_PF_EMULATE = 1,
RET_PF_INVALID = 2,
};
struct pte_list_desc {
u64 *sptes[PTE_LIST_EXT];
struct pte_list_desc *more;
};
struct kvm_shadow_walk_iterator {
u64 addr;
hpa_t shadow_addr;
u64 *sptep;
int level;
unsigned index;
};
static const union kvm_mmu_page_role mmu_base_role_mask = {
.cr0_wp = 1,
.gpte_is_8_bytes = 1,
.nxe = 1,
.smep_andnot_wp = 1,
.smap_andnot_wp = 1,
.smm = 1,
.guest_mode = 1,
.ad_disabled = 1,
};
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
(_root), (_addr)); \
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
#define for_each_shadow_entry(_vcpu, _addr, _walker) \
for (shadow_walk_init(&(_walker), _vcpu, _addr); \
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
for (shadow_walk_init(&(_walker), _vcpu, _addr); \
shadow_walk_okay(&(_walker)) && \
({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
__shadow_walk_next(&(_walker), spte))
static struct kmem_cache *pte_list_desc_cache;
static struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
static u64 __read_mostly shadow_mmio_value;
static u64 __read_mostly shadow_present_mask;
static u64 __read_mostly shadow_me_mask;
/*
* SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
* Non-present SPTEs with shadow_acc_track_value set are in place for access
* tracking.
*/
static u64 __read_mostly shadow_acc_track_mask;
static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;
/*
* The mask/shift to use for saving the original R/X bits when marking the PTE
* as not-present for access tracking purposes. We do not save the W bit as the
* PTEs being access tracked also need to be dirty tracked, so the W bit will be
* restored only when a write is attempted to the page.
*/
static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
/*
* This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
* to guard against L1TF attacks.
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
/*
* The number of high-order 1 bits to use in the mask above.
*/
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
/*
* In some cases, we need to preserve the GFN of a non-present or reserved
* SPTE when we usurp the upper five bits of the physical address space to
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
* shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
* left into the reserved bits, i.e. the GFN in the SPTE will be split into
* high and low parts. This mask covers the lower bits of the GFN.
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
/*
* The number of non-reserved physical address bits irrespective of features
* that repurpose legal bits, e.g. MKTME.
*/
static u8 __read_mostly shadow_phys_bits;
static void mmu_spte_set(u64 *sptep, u64 spte);
static bool is_executable_pte(u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
static inline bool kvm_available_flush_tlb_with_range(void)
{
return kvm_x86_ops->tlb_remote_flush_with_range;
}
static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{
int ret = -ENOTSUPP;
if (range && kvm_x86_ops->tlb_remote_flush_with_range)
ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
if (ret)
kvm_flush_remote_tlbs(kvm);
}
static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages)
{
struct kvm_tlb_range range;
range.start_gfn = start_gfn;
range.pages = pages;
kvm_flush_remote_tlbs_with_range(kvm, &range);
}
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{
BUG_ON((mmio_mask & mmio_value) != mmio_value);
shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
{
return sp->role.ad_disabled;
}
static inline bool spte_ad_enabled(u64 spte)
{
MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
return !(spte & shadow_acc_track_value);
}
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}
static inline u64 spte_shadow_dirty_mask(u64 spte)
{
MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}
static inline bool is_access_track_spte(u64 spte)
{
return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
}
/*
* Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
* the memslots generation and is derived as follows:
*
* Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
* Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
*
* The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
* the MMIO generation number, as doing so would require stealing a bit from
* the "real" generation number and thus effectively halve the maximum number
* of MMIO generations that can be handled before encountering a wrap (which
* requires a full MMU zap). The flag is instead explicitly queried when
* checking for MMIO spte cache hits.
*/
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(18, 0)
#define MMIO_SPTE_GEN_LOW_START 3
#define MMIO_SPTE_GEN_LOW_END 11
#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
MMIO_SPTE_GEN_LOW_START)
#define MMIO_SPTE_GEN_HIGH_START 52
#define MMIO_SPTE_GEN_HIGH_END 61
#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
MMIO_SPTE_GEN_HIGH_START)
static u64 generation_mmio_spte_mask(u64 gen)
{
u64 mask;
WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
return mask;
}
static u64 get_mmio_spte_generation(u64 spte)
{
u64 gen;
spte &= ~shadow_mmio_mask;
gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
return gen;
}
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned access)
{
u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
u64 mask = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
access &= ACC_WRITE_MASK | ACC_USER_MASK;
mask |= shadow_mmio_value | access;
mask |= gpa | shadow_nonpresent_or_rsvd_mask;
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len;
page_header(__pa(sptep))->mmio_cached = true;
trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
}
static bool is_mmio_spte(u64 spte)
{
return (spte & shadow_mmio_mask) == shadow_mmio_value;
}
static gfn_t get_mmio_spte_gfn(u64 spte)
{
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask;
return gpa >> PAGE_SHIFT;
}
static unsigned get_mmio_spte_access(u64 spte)
{
u64 mask = generation_mmio_spte_mask(MMIO_SPTE_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) & ~PAGE_MASK;
}
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
kvm_pfn_t pfn, unsigned access)
{
if (unlikely(is_noslot_pfn(pfn))) {
mark_mmio_spte(vcpu, sptep, gfn, access);
return true;
}
return false;
}
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{
u64 kvm_gen, spte_gen, gen;
gen = kvm_vcpu_memslots(vcpu)->generation;
if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
return false;
kvm_gen = gen & MMIO_SPTE_GEN_MASK;
spte_gen = get_mmio_spte_generation(spte);
trace_check_mmio_spte(spte, kvm_gen, spte_gen);
return likely(kvm_gen == spte_gen);
}
/*
* Sets the shadow PTE masks used by the MMU.
*
* Assumptions:
* - Setting either @accessed_mask or @dirty_mask requires setting both
* - At least one of @accessed_mask or @acc_track_mask must be set
*/
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
u64 acc_track_mask, u64 me_mask)
{
BUG_ON(!dirty_mask != !accessed_mask);
BUG_ON(!accessed_mask && !acc_track_mask);
BUG_ON(acc_track_mask & shadow_acc_track_value);
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
shadow_present_mask = p_mask;
shadow_acc_track_mask = acc_track_mask;
shadow_me_mask = me_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static u8 kvm_get_shadow_phys_bits(void)
{
/*
* boot_cpu_data.x86_phys_bits is reduced when MKTME is detected
* in CPU detection code, but MKTME treats those reduced bits as
* 'keyID' thus they are not reserved bits. Therefore for MKTME
* we should still return physical address bits reported by CPUID.
*/
if (!boot_cpu_has(X86_FEATURE_TME) ||
WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008))
return boot_cpu_data.x86_phys_bits;
return cpuid_eax(0x80000008) & 0xff;
}
static void kvm_mmu_reset_all_pte_masks(void)
{
u8 low_phys_bits;
shadow_user_mask = 0;
shadow_accessed_mask = 0;
shadow_dirty_mask = 0;
shadow_nx_mask = 0;
shadow_x_mask = 0;
shadow_mmio_mask = 0;
shadow_present_mask = 0;
shadow_acc_track_mask = 0;
shadow_phys_bits = kvm_get_shadow_phys_bits();
/*
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*
* Some Intel CPUs address the L1 cache using more PA bits than are
* reported by CPUID. Use the PA width of the L1 cache when possible
* to achieve more effective mitigation, e.g. if system RAM overlaps
* the most significant bits of legal physical address space.
*/
shadow_nonpresent_or_rsvd_mask = 0;
low_phys_bits = boot_cpu_data.x86_cache_bits;
if (boot_cpu_data.x86_cache_bits <
52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_cache_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_cache_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
} else
WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}
static int is_cpuid_PSE36(void)
{
return 1;
}
static int is_nx(struct kvm_vcpu *vcpu)
{
return vcpu->arch.efer & EFER_NX;
}
static int is_shadow_present_pte(u64 pte)
{
return (pte != 0) && !is_mmio_spte(pte);
}
static int is_large_pte(u64 pte)
{
return pte & PT_PAGE_SIZE_MASK;
}
static int is_last_spte(u64 pte, int level)
{
if (level == PT_PAGE_TABLE_LEVEL)
return 1;
if (is_large_pte(pte))
return 1;
return 0;
}
static bool is_executable_pte(u64 spte)
{
return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
}
static kvm_pfn_t spte_to_pfn(u64 pte)
{
return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
}
static gfn_t pse36_gfn_delta(u32 gpte)
{
int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
return (gpte & PT32_DIR_PSE36_MASK) << shift;
}
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
WRITE_ONCE(*sptep, spte);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
WRITE_ONCE(*sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
return xchg(sptep, spte);
}
static u64 __get_spte_lockless(u64 *sptep)
{
return READ_ONCE(*sptep);
}
#else
union split_spte {
struct {
u32 spte_low;
u32 spte_high;
};
u64 spte;
};
static void count_spte_clear(u64 *sptep, u64 spte)
{
struct kvm_mmu_page *sp = page_header(__pa(sptep));
if (is_shadow_present_pte(spte))
return;
/* Ensure the spte is completely set before we increase the count */
smp_wmb();
sp->clear_spte_count++;
}
static void __set_spte(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
ssptep->spte_high = sspte.spte_high;
/*
* If we map the spte from nonpresent to present, We should store
* the high bits firstly, then set present bit, so cpu can not
* fetch this spte while we are setting the spte.
*/
smp_wmb();
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
/*
* If we map the spte from present to nonpresent, we should clear
* present bit firstly to avoid vcpu fetch the old high bits.
*/
smp_wmb();
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte, orig;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
/* xchg acts as a barrier before the setting of the high bits */
orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
orig.spte_high = ssptep->spte_high;
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
return orig.spte;
}
/*
* The idea using the light way get the spte on x86_32 guest is from
* gup_get_pte (mm/gup.c).
*
* An spte tlb flush may be pending, because kvm_set_pte_rmapp
* coalesces them and we are running out of the MMU lock. Therefore
* we need to protect against in-progress updates of the spte.
*
* Reading the spte while an update is in progress may get the old value
* for the high part of the spte. The race is fine for a present->non-present
* change (because the high part of the spte is ignored for non-present spte),
* but for a present->present change we must reread the spte.
*
* All such changes are done in two steps (present->non-present and
* non-present->present), hence it is enough to count the number of
* present->non-present updates: if it changed while reading the spte,
* we might have hit the race. This is done using clear_spte_count.
*/
static u64 __get_spte_lockless(u64 *sptep)
{
struct kvm_mmu_page *sp = page_header(__pa(sptep));
union split_spte spte, *orig = (union split_spte *)sptep;
int count;
retry:
count = sp->clear_spte_count;
smp_rmb();
spte.spte_low = orig->spte_low;
smp_rmb();
spte.spte_high = orig->spte_high;
smp_rmb();
if (unlikely(spte.spte_low != orig->spte_low ||
count != sp->clear_spte_count))
goto retry;
return spte.spte;
}
#endif
static bool spte_can_locklessly_be_made_writable(u64 spte)
{
return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
}
static bool spte_has_volatile_bits(u64 spte)
{
if (!is_shadow_present_pte(spte))
return false;
/*
* Always atomically update spte if it can be updated
* out of mmu-lock, it can ensure dirty bit is not lost,
* also, it can help us to get a stable is_writable_pte()
* to ensure tlb flush is not missed.
*/
if (spte_can_locklessly_be_made_writable(spte) ||
is_access_track_spte(spte))
return true;
if (spte_ad_enabled(spte)) {
if ((spte & shadow_accessed_mask) == 0 ||
(is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
return true;
}
return false;
}
static bool is_accessed_spte(u64 spte)
{
u64 accessed_mask = spte_shadow_accessed_mask(spte);
return accessed_mask ? spte & accessed_mask
: !is_access_track_spte(spte);
}
static bool is_dirty_spte(u64 spte)
{
u64 dirty_mask = spte_shadow_dirty_mask(spte);
return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
}
/* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present
* or in a state where the hardware will not attempt to update
* the spte.
*/
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
WARN_ON(is_shadow_present_pte(*sptep));
__set_spte(sptep, new_spte);
}
/*
* Update the SPTE (excluding the PFN), but do not track changes in its
* accessed/dirty status.
*/
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
{
u64 old_spte = *sptep;
WARN_ON(!is_shadow_present_pte(new_spte));
if (!is_shadow_present_pte(old_spte)) {
mmu_spte_set(sptep, new_spte);
return old_spte;
}
if (!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, new_spte);
else
old_spte = __update_clear_spte_slow(sptep, new_spte);
WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
return old_spte;
}
/* Rules for using mmu_spte_update:
* Update the state bits, it means the mapped pfn is not changed.
*
* Whenever we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect
* will find a read-only spte, even though the writable spte
* might be cached on a CPU's TLB, the return value indicates this
* case.
*
* Returns true if the TLB needs to be flushed
*/
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
bool flush = false;
u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
if (!is_shadow_present_pte(old_spte))
return false;
/*
* For the spte updated out of mmu-lock is safe, since
* we always atomically update it, see the comments in
* spte_has_volatile_bits().
*/
if (spte_can_locklessly_be_made_writable(old_spte) &&
!is_writable_pte(new_spte))
flush = true;
/*
* Flush TLB when accessed/dirty states are changed in the page tables,
* to guarantee consistency between TLB and page tables.
*/
if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
flush = true;
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}
if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
flush = true;
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
}
return flush;
}
/*
* Rules for using mmu_spte_clear_track_bits:
* It sets the sptep from present to nonpresent, and track the
* state bits, it is used to clear the last level sptep.
* Returns non-zero if the PTE was previously valid.
*/
static int mmu_spte_clear_track_bits(u64 *sptep)
{
kvm_pfn_t pfn;
u64 old_spte = *sptep;
if (!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull);
else
old_spte = __update_clear_spte_slow(sptep, 0ull);
if (!is_shadow_present_pte(old_spte))
return 0;
pfn = spte_to_pfn(old_spte);
/*
* KVM does not hold the refcount of the page used by
* kvm mmu, before reclaiming the page, we should
* unmap it from mmu first.
*/
WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
if (is_accessed_spte(old_spte))
kvm_set_pfn_accessed(pfn);
if (is_dirty_spte(old_spte))
kvm_set_pfn_dirty(pfn);
return 1;
}
/*
* Rules for using mmu_spte_clear_no_track:
* Directly clear spte without caring the state bits of sptep,
* it is used to set the upper level spte.
*/
static void mmu_spte_clear_no_track(u64 *sptep)
{
__update_clear_spte_fast(sptep, 0ull);
}
static u64 mmu_spte_get_lockless(u64 *sptep)
{
return __get_spte_lockless(sptep);
}
static u64 mark_spte_for_access_track(u64 spte)
{
if (spte_ad_enabled(spte))
return spte & ~shadow_accessed_mask;
if (is_access_track_spte(spte))
return spte;
/*
* Making an Access Tracking PTE will result in removal of write access
* from the PTE. So, verify that we will be able to restore the write
* access in the fast page fault path later on.
*/
WARN_ONCE((spte & PT_WRITABLE_MASK) &&
!spte_can_locklessly_be_made_writable(spte),
"kvm: Writable SPTE is not locklessly dirty-trackable\n");
WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
shadow_acc_track_saved_bits_shift),
"kvm: Access Tracking saved bit locations are not zero\n");
spte |= (spte & shadow_acc_track_saved_bits_mask) <<
shadow_acc_track_saved_bits_shift;
spte &= ~shadow_acc_track_mask;
return spte;
}
/* Restore an acc-track PTE back to a regular PTE */
static u64 restore_acc_track_spte(u64 spte)
{
u64 new_spte = spte;
u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
& shadow_acc_track_saved_bits_mask;
WARN_ON_ONCE(spte_ad_enabled(spte));
WARN_ON_ONCE(!is_access_track_spte(spte));
new_spte &= ~shadow_acc_track_mask;
new_spte &= ~(shadow_acc_track_saved_bits_mask <<
shadow_acc_track_saved_bits_shift);
new_spte |= saved_bits;
return new_spte;
}
/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
u64 spte = mmu_spte_get_lockless(sptep);
if (!is_accessed_spte(spte))
return false;
if (spte_ad_enabled(spte)) {
clear_bit((ffs(shadow_accessed_mask) - 1),
(unsigned long *)sptep);
} else {
/*
* Capture the dirty status of the page, so that it doesn't get
* lost when the SPTE is marked for access tracking.
*/
if (is_writable_pte(spte))
kvm_set_pfn_dirty(spte_to_pfn(spte));
spte = mark_spte_for_access_track(spte);
mmu_spte_update_no_track(sptep, spte);
}
return true;
}
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
/*
* Prevent page table teardown by making any free-er wait during
* kvm_flush_remote_tlbs() IPI to all active vcpus.
*/
local_irq_disable();
/*
* Make sure a following spte read is not reordered ahead of the write
* to vcpu->mode.
*/
smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
}
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
/*
* Make sure the write to vcpu->mode is not reordered in front of
* reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
*/
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min)
{
void *obj;
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT);
if (!obj)
return cache->nobjs >= min ? 0 : -ENOMEM;
cache->objects[cache->nobjs++] = obj;
}
return 0;
}
static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
{
return cache->nobjs;
}
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
struct kmem_cache *cache)
{
while (mc->nobjs)
kmem_cache_free(cache, mc->objects[--mc->nobjs]);
}
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
int min)
{
void *page;
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
return cache->nobjs >= min ? 0 : -ENOMEM;
cache->objects[cache->nobjs++] = page;
}
return 0;
}
static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
while (mc->nobjs)
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
{
int r;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
if (r)
goto out;
r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
if (r)
goto out;
r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
mmu_page_header_cache, 4);
out:
return r;
}
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
pte_list_desc_cache);
mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
mmu_page_header_cache);
}
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
void *p;
BUG_ON(!mc->nobjs);
p = mc->objects[--mc->nobjs];
return p;
}
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
{
return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
}
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
{
kmem_cache_free(pte_list_desc_cache, pte_list_desc);
}
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
if (!sp->role.direct)
return sp->gfns[index];
return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}
static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
if (!sp->role.direct) {
sp->gfns[index] = gfn;
return;
}
if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
pr_err_ratelimited("gfn mismatch under direct page %llx "
"(expected %llx, got %llx)\n",
sp->gfn,
kvm_mmu_page_get_gfn(sp, index), gfn);
}
/*
* Return the pointer to the large page information for a given gfn,
* handling slots that are not large page aligned.
*/
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
struct kvm_memory_slot *slot,
int level)
{
unsigned long idx;
idx = gfn_to_index(gfn, slot->base_gfn, level);
return &slot->arch.lpage_info[level - 2][idx];
}
static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
gfn_t gfn, int count)
{
struct kvm_lpage_info *linfo;
int i;
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->disallow_lpage += count;
WARN_ON(linfo->disallow_lpage < 0);
}
}
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
update_gfn_disallow_lpage_count(slot, gfn, 1);
}
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
update_gfn_disallow_lpage_count(slot, gfn, -1);
}
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
gfn_t gfn;
kvm->arch.indirect_shadow_pages++;
gfn = sp->gfn;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
/* the non-leaf shadow pages are keeping readonly. */
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
return kvm_slot_page_track_add_page(kvm, slot, gfn,
KVM_PAGE_TRACK_WRITE);
kvm_mmu_gfn_disallow_lpage(slot, gfn);
}
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
gfn_t gfn;
kvm->arch.indirect_shadow_pages--;
gfn = sp->gfn;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
return kvm_slot_page_track_remove_page(kvm, slot, gfn,
KVM_PAGE_TRACK_WRITE);
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
struct kvm_lpage_info *linfo;
if (slot) {
linfo = lpage_info_slot(gfn, slot, level);
return !!linfo->disallow_lpage;
}
return true;
}
static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
int level)
{
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
}
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
{
unsigned long page_size;
int i, ret = 0;
page_size = kvm_host_page_size(kvm, gfn);
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
if (page_size >= KVM_HPAGE_SIZE(i))
ret = i;
else
break;
}
return ret;
}
static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
bool no_dirty_log)
{
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return false;
if (no_dirty_log && slot->dirty_bitmap)
return false;
return true;
}
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (!memslot_valid_for_gpte(slot, no_dirty_log))
slot = NULL;
return slot;
}
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
bool *force_pt_level)
{
int host_level, level, max_level;
struct kvm_memory_slot *slot;
if (unlikely(*force_pt_level))
return PT_PAGE_TABLE_LEVEL;
slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
*force_pt_level = !memslot_valid_for_gpte(slot, true);
if (unlikely(*force_pt_level))
return PT_PAGE_TABLE_LEVEL;
host_level = host_mapping_level(vcpu->kvm, large_gfn);
if (host_level == PT_PAGE_TABLE_LEVEL)
return host_level;
max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
break;
return level - 1;
}
/*
* About rmap_head encoding:
*
* If the bit zero of rmap_head->val is clear, then it points to the only spte
* in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
* pte_list_desc containing more mappings.
*/
/*
* Returns the number of pointers in the rmap chain, not counting the new one.
*/
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc;
int i, count = 0;
if (!rmap_head->val) {
rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
rmap_head->val = (unsigned long)spte;
} else if (!(rmap_head->val & 1)) {
rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_pte_list_desc(vcpu);
desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte;
rmap_head->val = (unsigned long)desc | 1;
++count;
} else {
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more;
count += PTE_LIST_EXT;
}
if (desc->sptes[PTE_LIST_EXT-1]) {
desc->more = mmu_alloc_pte_list_desc(vcpu);
desc = desc->more;
}
for (i = 0; desc->sptes[i]; ++i)
++count;
desc->sptes[i] = spte;
}
return count;
}
static void
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
struct pte_list_desc *desc, int i,
struct pte_list_desc *prev_desc)
{
int j;
for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
;
desc->sptes[i] = desc->sptes[j];
desc->sptes[j] = NULL;
if (j != 0)
return;
if (!prev_desc && !desc->more)
rmap_head->val = (unsigned long)desc->sptes[0];
else
if (prev_desc)
prev_desc->more = desc->more;
else
rmap_head->val = (unsigned long)desc->more | 1;
mmu_free_pte_list_desc(desc);
}
static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc;
struct pte_list_desc *prev_desc;
int i;
if (!rmap_head->val) {
pr_err("%s: %p 0->BUG\n", __func__, spte);
BUG();
} else if (!(rmap_head->val & 1)) {
rmap_printk("%s: %p 1->0\n", __func__, spte);
if ((u64 *)rmap_head->val != spte) {
pr_err("%s: %p 1->BUG\n", __func__, spte);
BUG();
}
rmap_head->val = 0;
} else {
rmap_printk("%s: %p many->many\n", __func__, spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
prev_desc = NULL;
while (desc) {
for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
if (desc->sptes[i] == spte) {
pte_list_desc_remove_entry(rmap_head,
desc, i, prev_desc);
return;
}
}
prev_desc = desc;
desc = desc->more;
}
pr_err("%s: %p many->many\n", __func__, spte);
BUG();
}
}
static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
{
mmu_spte_clear_track_bits(sptep);
__pte_list_remove(sptep, rmap_head);
}
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
unsigned long idx;
idx = gfn_to_index(gfn, slot->base_gfn, level);
return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
}
static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
return __gfn_to_rmap(gfn, sp->role.level, slot);
}
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_memory_cache *cache;
cache = &vcpu->arch.mmu_pte_list_desc_cache;
return mmu_memory_cache_free_objects(cache);
}
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head;
sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
return pte_list_add(vcpu, spte, rmap_head);
}
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
struct kvm_mmu_page *sp;
gfn_t gfn;
struct kvm_rmap_head *rmap_head;
sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmap_head = gfn_to_rmap(kvm, gfn, sp);
__pte_list_remove(spte, rmap_head);
}
/*
* Used by the following functions to iterate through the sptes linked by a
* rmap. All fields are private and not assumed to be used outside.
*/
struct rmap_iterator {
/* private fields */
struct pte_list_desc *desc; /* holds the sptep if not NULL */
int pos; /* index of the sptep */
};
/*
* Iteration must be started by this function. This should also be used after
* removing/dropping sptes from the rmap link because in such cases the
* information in the itererator may not be valid.
*
* Returns sptep if found, NULL otherwise.
*/
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
struct rmap_iterator *iter)
{
u64 *sptep;
if (!rmap_head->val)
return NULL;
if (!(rmap_head->val & 1)) {
iter->desc = NULL;
sptep = (u64 *)rmap_head->val;
goto out;
}
iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
iter->pos = 0;
sptep = iter->desc->sptes[iter->pos];
out:
BUG_ON(!is_shadow_present_pte(*sptep));
return sptep;
}
/*
* Must be used with a valid iterator: e.g. after rmap_get_first().
*
* Returns sptep if found, NULL otherwise.
*/
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
u64 *sptep;
if (iter->desc) {
if (iter->pos < PTE_LIST_EXT - 1) {
++iter->pos;
sptep = iter->desc->sptes[iter->pos];
if (sptep)
goto out;
}
iter->desc = iter->desc->more;
if (iter->desc) {
iter->pos = 0;
/* desc->sptes[0] cannot be NULL */
sptep = iter->desc->sptes[iter->pos];
goto out;
}
}
return NULL;
out:
BUG_ON(!is_shadow_present_pte(*sptep));
return sptep;
}
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
_spte_; _spte_ = rmap_get_next(_iter_))
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
if (mmu_spte_clear_track_bits(sptep))
rmap_remove(kvm, sptep);
}
static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
if (is_large_pte(*sptep)) {
WARN_ON(page_header(__pa(sptep))->role.level ==
PT_PAGE_TABLE_LEVEL);
drop_spte(kvm, sptep);
--kvm->stat.lpages;
return true;
}
return false;
}
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
if (__drop_large_spte(vcpu->kvm, sptep)) {
struct kvm_mmu_page *sp = page_header(__pa(sptep));
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
KVM_PAGES_PER_HPAGE(sp->role.level));
}
}
/*
* Write-protect on the specified @sptep, @pt_protect indicates whether
* spte write-protection is caused by protecting shadow page table.
*
* Note: write protection is difference between dirty logging and spte
* protection:
* - for dirty logging, the spte can be set to writable at anytime if
* its dirty bitmap is properly set.
* - for spte protection, the spte can be writable only after unsync-ing
* shadow page.
*
* Return true if tlb need be flushed.
*/
static bool spte_write_protect(u64 *sptep, bool pt_protect)
{
u64 spte = *sptep;
if (!is_writable_pte(spte) &&
!(pt_protect && spte_can_locklessly_be_made_writable(spte)))
return false;
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
if (pt_protect)
spte &= ~SPTE_MMU_WRITEABLE;
spte = spte & ~PT_WRITABLE_MASK;
return mmu_spte_update(sptep, spte);
}
static bool __rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
bool pt_protect)
{
u64 *sptep;
struct rmap_iterator iter;
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
flush |= spte_write_protect(sptep, pt_protect);
return flush;
}
static bool spte_clear_dirty(u64 *sptep)
{
u64 spte = *sptep;
rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
spte &= ~shadow_dirty_mask;
return mmu_spte_update(sptep, spte);
}
static bool wrprot_ad_disabled_spte(u64 *sptep)
{
bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
(unsigned long *)sptep);
if (was_writable)
kvm_set_pfn_dirty(spte_to_pfn(*sptep));
return was_writable;
}
/*
* Gets the GFN ready for another round of dirty logging by clearing the
* - D bit on ad-enabled SPTEs, and
* - W bit on ad-disabled SPTEs.
* Returns true iff any D or W bits were cleared.
*/
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
u64 *sptep;
struct rmap_iterator iter;
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
if (spte_ad_enabled(*sptep))
flush |= spte_clear_dirty(sptep);
else
flush |= wrprot_ad_disabled_spte(sptep);
return flush;
}
static bool spte_set_dirty(u64 *sptep)
{
u64 spte = *sptep;
rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
spte |= shadow_dirty_mask;
return mmu_spte_update(sptep, spte);
}
static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
u64 *sptep;
struct rmap_iterator iter;
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
if (spte_ad_enabled(*sptep))
flush |= spte_set_dirty(sptep);
return flush;
}
/**
* kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
* @kvm: kvm instance
* @slot: slot to protect
* @gfn_offset: start of the BITS_PER_LONG pages we care about
* @mask: indicates which pages we should protect
*
* Used when we do not need to care about huge page mappings: e.g. during dirty
* logging we do not have any such mappings.
*/
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
struct kvm_rmap_head *rmap_head;
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot);
__rmap_write_protect(kvm, rmap_head, false);
/* clear the first set bit */
mask &= mask - 1;
}
}
/**
* kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
* protect the page if the D-bit isn't supported.
* @kvm: kvm instance
* @slot: slot to clear D-bit
* @gfn_offset: start of the BITS_PER_LONG pages we care about
* @mask: indicates which pages we should clear D-bit
*
* Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
*/
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
struct kvm_rmap_head *rmap_head;
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot);
__rmap_clear_dirty(kvm, rmap_head);
/* clear the first set bit */
mask &= mask - 1;
}
}
EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
/**
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
* PT level pages.
*
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
* enable dirty logging for them.
*
* Used when we do not need to care about huge page mappings: e.g. during dirty
* logging we do not have any such mappings.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
if (kvm_x86_ops->enable_log_dirty_pt_masked)
kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
mask);
else
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
/**
* kvm_arch_write_log_dirty - emulate dirty page logging
* @vcpu: Guest mode vcpu
*
* Emulate arch specific page modification logging for the
* nested hypervisor
*/
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
{
if (kvm_x86_ops->write_log_dirty)
return kvm_x86_ops->write_log_dirty(vcpu);
return 0;
}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn)
{
struct kvm_rmap_head *rmap_head;
int i;
bool write_protected = false;
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}
return write_protected;
}
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
}
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
u64 *sptep;
struct rmap_iterator iter;
bool flush = false;
while ((sptep = rmap_get_first(rmap_head, &iter))) {
rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
pte_list_remove(rmap_head, sptep);
flush = true;
}
return flush;
}
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{
return kvm_zap_rmapp(kvm, rmap_head);
}
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{
u64 *sptep;
struct rmap_iterator iter;
int need_flush = 0;
u64 new_spte;
pte_t *ptep = (pte_t *)data;
kvm_pfn_t new_pfn;
WARN_ON(pte_huge(*ptep));
new_pfn = pte_pfn(*ptep);
restart:
for_each_rmap_spte(rmap_head, &iter, sptep) {
rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
sptep, *sptep, gfn, level);
need_flush = 1;
if (pte_write(*ptep)) {
pte_list_remove(rmap_head, sptep);
goto restart;
} else {
new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE;
new_spte = mark_spte_for_access_track(new_spte);
mmu_spte_clear_track_bits(sptep);
mmu_spte_set(sptep, new_spte);
}
}
if (need_flush && kvm_available_flush_tlb_with_range()) {
kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
return 0;
}
return need_flush;
}
struct slot_rmap_walk_iterator {
/* input fields. */
struct kvm_memory_slot *slot;
gfn_t start_gfn;
gfn_t end_gfn;
int start_level;
int end_level;
/* output fields. */
gfn_t gfn;
struct kvm_rmap_head *rmap;
int level;
/* private field. */
struct kvm_rmap_head *end_rmap;
};
static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
iterator->level = level;
iterator->gfn = iterator->start_gfn;
iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
iterator->slot);
}
static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
struct kvm_memory_slot *slot, int start_level,
int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
iterator->slot = slot;
iterator->start_level = start_level;
iterator->end_level = end_level;
iterator->start_gfn = start_gfn;
iterator->end_gfn = end_gfn;
rmap_walk_init_level(iterator, iterator->start_level);
}
static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
return !!iterator->rmap;
}
static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
if (++iterator->rmap <= iterator->end_rmap) {
iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
return;
}
if (++iterator->level > iterator->end_level) {
iterator->rmap = NULL;
return;
}
rmap_walk_init_level(iterator, iterator->level);
}
#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \
_start_gfn, _end_gfn, _iter_) \
for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \
_end_level_, _start_gfn, _end_gfn); \
slot_rmap_walk_okay(_iter_); \
slot_rmap_walk_next(_iter_))
static int kvm_handle_hva_range(struct kvm *kvm,
unsigned long start,
unsigned long end,
unsigned long data,
int (*handler)(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot,
gfn_t gfn,
int level,
unsigned long data))
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
struct slot_rmap_walk_iterator iterator;
int ret = 0;
int i;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
unsigned long hva_start, hva_end;
gfn_t gfn_start, gfn_end;
hva_start = max(start, memslot->userspace_addr);
hva_end = min(end, memslot->userspace_addr +
(memslot->npages << PAGE_SHIFT));
if (hva_start >= hva_end)
continue;
/*
* {gfn(page) | page intersects with [hva_start, hva_end)} =
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
*/
gfn_start = hva_to_gfn_memslot(hva_start, memslot);
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
PT_MAX_HUGEPAGE_LEVEL,
gfn_start, gfn_end - 1,
&iterator)
ret |= handler(kvm, iterator.rmap, memslot,
iterator.gfn, iterator.level, data);
}
}
return ret;
}
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
unsigned long data,
int (*handler)(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot,
gfn_t gfn, int level,
unsigned long data))
{
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
}
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{
u64 *sptep;
struct rmap_iterator uninitialized_var(iter);
int young = 0;
for_each_rmap_spte(rmap_head, &iter, sptep)
young |= mmu_spte_age(sptep);
trace_kvm_age_page(gfn, level, slot, young);
return young;
}
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn,
int level, unsigned long data)
{
u64 *sptep;
struct rmap_iterator iter;
for_each_rmap_spte(rmap_head, &iter, sptep)
if (is_accessed_spte(*sptep))
return 1;
return 0;
}
#define RMAP_RECYCLE_THRESHOLD 1000
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *sp;
sp = page_header(__pa(spte));
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
KVM_PAGES_PER_HPAGE(sp->role.level));
}
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
}
#ifdef MMU_DEBUG
static int is_empty_shadow_page(u64 *spt)
{
u64 *pos;
u64 *end;
for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
if (is_shadow_present_pte(*pos)) {
printk(KERN_ERR "%s: %p %llx\n", __func__,
pos, *pos);
return 0;
}
return 1;
}
#endif
/*
* This value is the sum of all of the kvm instances's
* kvm->arch.n_used_mmu_pages values. We need a global,
* aggregate version in order to make the slab shrinker
* faster
*/
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
{
kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
list_del(&sp->link);
free_page((unsigned long)sp->spt);
if (!sp->role.direct)
free_page((unsigned long)sp->gfns);
kmem_cache_free(mmu_page_header_cache, sp);
}
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
return hash_64(gfn, KVM_MMU_HASH_SHIFT);
}
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *parent_pte)
{
if (!parent_pte)
return;
pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
}
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
__pte_list_remove(parent_pte, &sp->parent_ptes);
}
static void drop_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
mmu_page_remove_parent_pte(sp, parent_pte);
mmu_spte_clear_no_track(parent_pte);
}
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
{
struct kvm_mmu_page *sp;
sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp;
}
static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
u64 *sptep;
struct rmap_iterator iter;
for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
mark_unsync(sptep);
}
}
static void mark_unsync(u64 *spte)
{
struct kvm_mmu_page *sp;
unsigned int index;
sp = page_header(__pa(spte));
index = spte - sp->spt;
if (__test_and_set_bit(index, sp->unsync_child_bitmap))
return;
if (sp->unsync_children++)
return;
kvm_mmu_mark_parents_unsync(sp);
}
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
return 0;
}
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
{
}
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *pte)
{
WARN_ON(1);
}
#define KVM_PAGE_ARRAY_NR 16
struct kvm_mmu_pages {
struct mmu_page_and_offset {
struct kvm_mmu_page *sp;
unsigned int idx;
} page[KVM_PAGE_ARRAY_NR];
unsigned int nr;
};
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
int idx)
{
int i;
if (sp->unsync)
for (i=0; i < pvec->nr; i++)
if (pvec->page[i].sp == sp)
return 0;
pvec->page[pvec->nr].sp = sp;
pvec->page[pvec->nr].idx = idx;
pvec->nr++;
return (pvec->nr == KVM_PAGE_ARRAY_NR);
}
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
--sp->unsync_children;
WARN_ON((int)sp->unsync_children < 0);
__clear_bit(idx, sp->unsync_child_bitmap);
}
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec)
{
int i, ret, nr_unsync_leaf = 0;
for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
struct kvm_mmu_page *child;
u64 ent = sp->spt[i];
if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
clear_unsync_child_bit(sp, i);
continue;
}
child = page_header(ent & PT64_BASE_ADDR_MASK);
if (child->unsync_children) {
if (mmu_pages_add(pvec, child, i))
return -ENOSPC;
ret = __mmu_unsync_walk(child, pvec);
if (!ret) {
clear_unsync_child_bit(sp, i);
continue;
} else if (ret > 0) {
nr_unsync_leaf += ret;
} else
return ret;
} else if (child->unsync) {
nr_unsync_leaf++;
if (mmu_pages_add(pvec, child, i))
return -ENOSPC;
} else
clear_unsync_child_bit(sp, i);
}
return nr_unsync_leaf;
}
#define INVALID_INDEX (-1)
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec)
{
pvec->nr = 0;
if (!sp->unsync_children)
return 0;
mmu_pages_add(pvec, sp, INVALID_INDEX);
return __mmu_unsync_walk(sp, pvec);
}
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!sp->unsync);
trace_kvm_mmu_sync_page(sp);
sp->unsync = 0;
--kvm->stat.mmu_unsync;
}
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);
#define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
if ((_sp)->role.invalid) { \
} else
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
for_each_valid_sp(_kvm, _sp, _gfn) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
static inline bool is_ept_sp(struct kvm_mmu_page *sp)
{
return sp->role.cr0_wp && sp->role.smap_andnot_wp;
}
/* @sp->gfn should be write-protected at the call site */
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
return true;
}
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
struct list_head *invalid_list,
bool remote_flush)
{
if (!remote_flush && list_empty(invalid_list))
return false;
if (!list_empty(invalid_list))
kvm_mmu_commit_zap_page(kvm, invalid_list);
else
kvm_flush_remote_tlbs(kvm);
return true;
}
static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
struct list_head *invalid_list,
bool remote_flush, bool local_flush)
{
if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
return;
if (local_flush)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
kvm_unlink_unsync_page(vcpu->kvm, sp);
return __kvm_sync_page(vcpu, sp, invalid_list);
}
/* @gfn should be write-protected at the call site */
static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
struct list_head *invalid_list)
{
struct kvm_mmu_page *s;
bool ret = false;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (!s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
ret |= kvm_sync_page(vcpu, s, invalid_list);
}
return ret;
}
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
unsigned int idx[PT64_ROOT_MAX_LEVEL];
};
#define for_each_sp(pvec, sp, parents, i) \
for (i = mmu_pages_first(&pvec, &parents); \
i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
i = mmu_pages_next(&pvec, &parents, i))
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
struct mmu_page_path *parents,
int i)
{
int n;
for (n = i+1; n < pvec->nr; n++) {
struct kvm_mmu_page *sp = pvec->page[n].sp;
unsigned idx = pvec->page[n].idx;
int level = sp->role.level;
parents->idx[level-1] = idx;
if (level == PT_PAGE_TABLE_LEVEL)
break;
parents->parent[level-2] = sp;
}
return n;
}
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
struct mmu_page_path *parents)
{
struct kvm_mmu_page *sp;
int level;
if (pvec->nr == 0)
return 0;
WARN_ON(pvec->page[0].idx != INVALID_INDEX);
sp = pvec->page[0].sp;
level = sp->role.level;
WARN_ON(level == PT_PAGE_TABLE_LEVEL);
parents->parent[level-2] = sp;
/* Also set up a sentinel. Further entries in pvec are all
* children of sp, so this element is never overwritten.
*/
parents->parent[level-1] = NULL;
return mmu_pages_next(pvec, parents, 0);
}
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
{
struct kvm_mmu_page *sp;
unsigned int level = 0;
do {
unsigned int idx = parents->idx[level];
sp = parents->parent[level];
if (!sp)
return;
WARN_ON(idx == INVALID_INDEX);
clear_unsync_child_bit(sp, idx);
level++;
} while (!sp->unsync_children);
}
static void mmu_sync_children(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *parent)
{
int i;
struct kvm_mmu_page *sp;
struct mmu_page_path parents;
struct kvm_mmu_pages pages;
LIST_HEAD(invalid_list);
bool flush = false;
while (mmu_unsync_walk(parent, &pages)) {
bool protected = false;
for_each_sp(pages, sp, parents, i)
protected |= rmap_write_protect(vcpu, sp->gfn);
if (protected) {
kvm_flush_remote_tlbs(vcpu->kvm);
flush = false;
}
for_each_sp(pages, sp, parents, i) {
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
cond_resched_lock(&vcpu->kvm->mmu_lock);
flush = false;
}
}
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
}
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
atomic_set(&sp->write_flooding_count, 0);
}
static void clear_sp_write_flooding_count(u64 *spte)
{
struct kvm_mmu_page *sp = page_header(__pa(spte));
__clear_sp_write_flooding_count(sp);
}
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
gva_t gaddr,
unsigned level,
int direct,
unsigned access)
{
union kvm_mmu_page_role role;
unsigned quadrant;
struct kvm_mmu_page *sp;
bool need_sync = false;
bool flush = false;
int collisions = 0;
LIST_HEAD(invalid_list);
role = vcpu->arch.mmu->mmu_role.base;
role.level = level;
role.direct = direct;
if (role.direct)
role.gpte_is_8_bytes = true;
role.access = access;
if (!vcpu->arch.mmu->direct_map
&& vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
for_each_valid_sp(vcpu->kvm, sp, gfn) {
if (sp->gfn != gfn) {
collisions++;
continue;
}
if (!need_sync && sp->unsync)
need_sync = true;
if (sp->role.word != role.word)
continue;
if (sp->unsync) {
/* The page is good, but __kvm_sync_page might still end
* up zapping it. If so, break in order to rebuild it.
*/
if (!__kvm_sync_page(vcpu, sp, &invalid_list))
break;
WARN_ON(!list_empty(&invalid_list));
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
if (sp->unsync_children)
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false);
goto out;
}
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, direct);
sp->gfn = gfn;
sp->role = role;
hlist_add_head(&sp->hash_link,
&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
if (!direct) {
/*
* we should do write protection before syncing pages
* otherwise the content of the synced shadow page may
* be inconsistent with guest page table.
*/
account_shadowed(vcpu->kvm, sp);
if (level == PT_PAGE_TABLE_LEVEL &&
rmap_write_protect(vcpu, gfn))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
clear_page(sp->spt);
trace_kvm_mmu_get_page(sp, true);
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
out:
if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
return sp;
}
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, hpa_t root,
u64 addr)
{
iterator->addr = addr;
iterator->shadow_addr = root;
iterator->level = vcpu->arch.mmu->shadow_root_level;
if (iterator->level == PT64_ROOT_4LEVEL &&
vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
!vcpu->arch.mmu->direct_map)
--iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) {
/*
* prev_root is currently only used for 64-bit hosts. So only
* the active root_hpa is valid here.
*/
BUG_ON(root != vcpu->arch.mmu->root_hpa);
iterator->shadow_addr
= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level;
if (!iterator->shadow_addr)
iterator->level = 0;
}
}
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr)
{
shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
addr);
}
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
if (iterator->level < PT_PAGE_TABLE_LEVEL)
return false;
iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
return true;
}
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
u64 spte)
{
if (is_last_spte(spte, iterator->level)) {
iterator->level = 0;
return;
}
iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
--iterator->level;
}
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
__shadow_walk_next(iterator, *iterator->sptep);
}
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
struct kvm_mmu_page *sp)
{
u64 spte;
BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
shadow_user_mask | shadow_x_mask | shadow_me_mask;
if (sp_ad_disabled(sp))
spte |= shadow_acc_track_value;
else
spte |= shadow_accessed_mask;
mmu_spte_set(sptep, spte);
mmu_page_add_parent_pte(vcpu, sp, sptep);
if (sp->unsync_children || sp->unsync)
mark_unsync(sptep);
}
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned direct_access)
{
if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
struct kvm_mmu_page *child;
/*
* For the direct sp, if the guest pte's dirty bit
* changed form clean to dirty, it will corrupt the
* sp's access: allow writable in the read-only sp,
* so we should update the spte at this point to get
* a new sp with the correct access.
*/
child = page_header(*sptep & PT64_BASE_ADDR_MASK);
if (child->role.access == direct_access)
return;
drop_parent_pte(child, sptep);
kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
}
}
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte)
{
u64 pte;
struct kvm_mmu_page *child;
pte = *spte;
if (is_shadow_present_pte(pte)) {
if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
if (is_large_pte(pte))
--kvm->stat.lpages;
} else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
}
return true;
}
if (is_mmio_spte(pte))
mmu_spte_clear_no_track(spte);
return false;
}
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
unsigned i;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
mmu_page_zap_pte(kvm, sp, sp->spt + i);
}
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 *sptep;
struct rmap_iterator iter;
while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
drop_parent_pte(sp, sptep);
}
static int mmu_zap_unsync_children(struct kvm *kvm,
struct kvm_mmu_page *parent,
struct list_head *invalid_list)
{
int i, zapped = 0;
struct mmu_page_path parents;
struct kvm_mmu_pages pages;
if (parent->role.level == PT_PAGE_TABLE_LEVEL)
return 0;
while (mmu_unsync_walk(parent, &pages)) {
struct kvm_mmu_page *sp;
for_each_sp(pages, sp, parents, i) {
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
mmu_pages_clear_parents(&parents);
zapped++;
}
}
return zapped;
}
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
struct kvm_mmu_page *sp,
struct list_head *invalid_list,
int *nr_zapped)
{
bool list_unstable;
trace_kvm_mmu_prepare_zap_page(sp);
++kvm->stat.mmu_shadow_zapped;
*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
kvm_mmu_page_unlink_children(kvm, sp);
kvm_mmu_unlink_parents(kvm, sp);
/* Zapping children means active_mmu_pages has become unstable. */
list_unstable = *nr_zapped;
if (!sp->role.invalid && !sp->role.direct)
unaccount_shadowed(kvm, sp);
if (sp->unsync)
kvm_unlink_unsync_page(kvm, sp);
if (!sp->root_count) {
/* Count self */
(*nr_zapped)++;
list_move(&sp->link, invalid_list);
kvm_mod_used_mmu_pages(kvm, -1);
} else {
list_move(&sp->link, &kvm->arch.active_mmu_pages);
if (!sp->role.invalid)
kvm_reload_remote_mmus(kvm);
}
sp->role.invalid = 1;
return list_unstable;
}
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
int nr_zapped;
__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
return nr_zapped;
}
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp, *nsp;
if (list_empty(invalid_list))
return;
/*
* We need to make sure everyone sees our modifications to
* the page tables and see changes to vcpu->mode here. The barrier
* in the kvm_flush_remote_tlbs() achieves this. This pairs
* with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
*
* In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
* guest mode and/or lockless shadow page table walks.
*/
kvm_flush_remote_tlbs(kvm);
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count);
kvm_mmu_free_page(sp);
}
}
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp;
if (list_empty(&kvm->arch.active_mmu_pages))
return false;
sp = list_last_entry(&kvm->arch.active_mmu_pages,
struct kvm_mmu_page, link);
return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
}
/*
* Changing the number of mmu pages allocated to the vm
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
{
LIST_HEAD(invalid_list);
spin_lock(&kvm->mmu_lock);
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
/* Need to free some mmu pages to achieve the goal. */
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
break;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
}
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
spin_unlock(&kvm->mmu_lock);
}
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
int r;
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0;
spin_lock(&kvm->mmu_lock);
for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
trace_kvm_mmu_unsync_page(sp);
++vcpu->kvm->stat.mmu_unsync;
sp->unsync = 1;
kvm_mmu_mark_parents_unsync(sp);
}
static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
struct kvm_mmu_page *sp;
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
return true;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (!can_unsync)
return true;
if (sp->unsync)
continue;
WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
kvm_unsync_page(vcpu, sp);
}
/*
* We need to ensure that the marking of unsync pages is visible
* before the SPTE is updated to allow writes because
* kvm_mmu_sync_roots() checks the unsync flags without holding
* the MMU lock and so can race with this. If the SPTE was updated
* before the page had been marked as unsync-ed, something like the
* following could happen:
*
* CPU 1 CPU 2
* ---------------------------------------------------------------------
* 1.2 Host updates SPTE
* to be writable
* 2.1 Guest writes a GPTE for GVA X.
* (GPTE being in the guest page table shadowed
* by the SP from CPU 1.)
* This reads SPTE during the page table walk.
* Since SPTE.W is read as 1, there is no
* fault.
*
* 2.2 Guest issues TLB flush.
* That causes a VM Exit.
*
* 2.3 kvm_mmu_sync_pages() reads sp->unsync.
* Since it is false, so it just returns.
*
* 2.4 Guest accesses GVA X.
* Since the mapping in the SP was not updated,
* so the old mapping for GVA X incorrectly
* gets used.
* 1.1 Host marks SP
* as unsync
* (sp->unsync = true)
*
* The write barrier below ensures that 1.1 happens before 1.2 and thus
* the situation in 2.4 does not arise. The implicit barrier in 2.2
* pairs with this write barrier.
*/
smp_wmb();
return false;
}
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
/*
* Some reserved pages, such as those from NVDIMM
* DAX devices, are not for MMIO, and can be mapped
* with cached memory type for better performance.
* However, the above check misconceives those pages
* as MMIO, and results in KVM mapping them with UC
* memory type, which would hurt the performance.
* Therefore, we check the host memory type in addition
* and only treat UC/UC-/WC pages as MMIO.
*/
(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
return !e820__mapped_raw_any(pfn_to_hpa(pfn),
pfn_to_hpa(pfn + 1) - 1,
E820_TYPE_RAM);
}
/* Bits which may be returned by set_spte() */
#define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
u64 spte = 0;
int ret = 0;
struct kvm_mmu_page *sp;
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0;
sp = page_header(__pa(sptep));
if (sp_ad_disabled(sp))
spte |= shadow_acc_track_value;
/*
* For the EPT case, shadow_present_mask is 0 if hardware
* supports exec-only page table entries. In that case,
* ACC_USER_MASK and shadow_user_mask are used to represent
* read access. See FNAME(gpte_access) in paging_tmpl.h.
*/
spte |= shadow_present_mask;
if (!speculative)
spte |= spte_shadow_accessed_mask(spte);
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
spte |= shadow_nx_mask;
if (pte_access & ACC_USER_MASK)
spte |= shadow_user_mask;
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
if (host_writable)
spte |= SPTE_HOST_WRITEABLE;
else
pte_access &= ~ACC_WRITE_MASK;
if (!kvm_is_mmio_pfn(pfn))
spte |= shadow_me_mask;
spte |= (u64)pfn << PAGE_SHIFT;
if (pte_access & ACC_WRITE_MASK) {
/*
* Other vcpu creates new sp in the window between
* mapping_level() and acquiring mmu-lock. We can
* allow guest to retry the access, the mapping can
* be fixed if guest refault.
*/
if (level > PT_PAGE_TABLE_LEVEL &&
mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
goto done;
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
/*
* Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection
* is responsibility of mmu_get_page / kvm_sync_page.
* Same reasoning can be applied to dirty page accounting.
*/
if (!can_unsync && is_writable_pte(*sptep))
goto set_pte;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
ret |= SET_SPTE_WRITE_PROTECTED_PT;
pte_access &= ~ACC_WRITE_MASK;
spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
}
}
if (pte_access & ACC_WRITE_MASK) {
kvm_vcpu_mark_page_dirty(vcpu, gfn);
spte |= spte_shadow_dirty_mask(spte);
}
if (speculative)
spte = mark_spte_for_access_track(spte);
set_pte:
if (mmu_spte_update(sptep, spte))
ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
done:
return ret;
}
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
bool speculative, bool host_writable)
{
int was_rmapped = 0;
int rmap_count;
int set_spte_ret;
int ret = RET_PF_RETRY;
bool flush = false;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn);
if (is_shadow_present_pte(*sptep)) {
/*
* If we overwrite a PTE page pointer with a 2MB PMD, unlink
* the parent of the now unreachable PTE.
*/
if (level > PT_PAGE_TABLE_LEVEL &&
!is_large_pte(*sptep)) {
struct kvm_mmu_page *child;
u64 pte = *sptep;
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, sptep);
flush = true;
} else if (pfn != spte_to_pfn(*sptep)) {
pgprintk("hfn old %llx new %llx\n",
spte_to_pfn(*sptep), pfn);
drop_spte(vcpu->kvm, sptep);
flush = true;
} else
was_rmapped = 1;
}
set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
speculative, true, host_writable);
if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (write_fault)
ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
KVM_PAGES_PER_HPAGE(level));
if (unlikely(is_mmio_spte(*sptep)))
ret = RET_PF_EMULATE;
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
trace_kvm_mmu_set_spte(level, gfn, sptep);
if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
if (is_shadow_present_pte(*sptep)) {
if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
}
}
return ret;
}
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
if (!slot)
return KVM_PFN_ERR_FAULT;
return gfn_to_pfn_memslot_atomic(slot, gfn);
}
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
{
struct page *pages[PTE_PREFETCH_NUM];
struct kvm_memory_slot *slot;
unsigned access = sp->role.access;
int i, ret;
gfn_t gfn;
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
if (!slot)
return -1;
ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
if (ret <= 0)
return -1;
for (i = 0; i < ret; i++, gfn++, start++) {
mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
page_to_pfn(pages[i]), true, true);
put_page(pages[i]);
}
return 0;
}
static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *sptep)
{
u64 *spte, *start = NULL;
int i;
WARN_ON(!sp->role.direct);
i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
if (is_shadow_present_pte(*spte) || spte == sptep) {
if (!start)
continue;
if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
break;
start = NULL;
} else if (!start)
start = spte;
}
}
static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
struct kvm_mmu_page *sp;
sp = page_header(__pa(sptep));
/*
* Without accessed bits, there's no way to distinguish between
* actually accessed translations and prefetched, so disable pte
* prefetch if accessed bits aren't available.
*/
if (sp_ad_disabled(sp))
return;
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
return;
__direct_pte_prefetch(vcpu, sp, sptep);
}
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
int map_writable, int level, kvm_pfn_t pfn,
bool prefault)
{
struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp;
int ret;
gfn_t gfn = gpa >> PAGE_SHIFT;
gfn_t base_gfn = gfn;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return RET_PF_RETRY;
trace_kvm_mmu_spte_requested(gpa, level, pfn);
for_each_shadow_entry(vcpu, gpa, it) {
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
if (it.level == level)
break;
drop_large_spte(vcpu, it.sptep);
if (!is_shadow_present_pte(*it.sptep)) {
sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
it.level - 1, true, ACC_ALL);
link_shadow_page(vcpu, it.sptep, sp);
}
}
ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
write, level, base_gfn, pfn, prefault,
map_writable);
direct_pte_prefetch(vcpu, it.sptep);
++vcpu->stat.pf_fixed;
return ret;
}
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
{
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
}
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
{
/*
* Do not cache the mmio info caused by writing the readonly gfn
* into the spte otherwise read access on readonly gfn also can
* caused mmio page fault and treat it as mmio access.
*/
if (pfn == KVM_PFN_ERR_RO_FAULT)
return RET_PF_EMULATE;
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
return RET_PF_RETRY;
}
return -EFAULT;
}
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
gfn_t gfn, kvm_pfn_t *pfnp,
int *levelp)
{
kvm_pfn_t pfn = *pfnp;
int level = *levelp;
/*
* Check if it's a transparent hugepage. If this would be an
* hugetlbfs page, level wouldn't be set to
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
* here.
*/
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
PageTransCompoundMap(pfn_to_page(pfn)) &&
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask;
/*
* mmu_notifier_retry was successful and we hold the
* mmu_lock here, so the pmd can't become splitting
* from under us, and in turn
* __split_huge_page_refcount() can't run from under
* us and we can safely transfer the refcount from
* PG_tail to PG_head as we switch the pfn to tail to
* head.
*/
*levelp = level = PT_DIRECTORY_LEVEL;
mask = KVM_PAGES_PER_HPAGE(level) - 1;
VM_BUG_ON((gfn & mask) != (pfn & mask));
if (pfn & mask) {
kvm_release_pfn_clean(pfn);
pfn &= ~mask;
kvm_get_pfn(pfn);
*pfnp = pfn;
}
}
}
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
kvm_pfn_t pfn, unsigned access, int *ret_val)
{
/* The pfn is invalid, report the error! */
if (unlikely(is_error_pfn(pfn))) {
*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
return true;
}
if (unlikely(is_noslot_pfn(pfn)))
vcpu_cache_mmio_info(vcpu, gva, gfn, access);
return false;
}
static bool page_fault_can_be_fast(u32 error_code)
{
/*
* Do not fix the mmio spte with invalid generation number which
* need to be updated by slow page fault path.
*/
if (unlikely(error_code & PFERR_RSVD_MASK))
return false;
/* See if the page fault is due to an NX violation */
if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
== (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
return false;
/*
* #PF can be fast if:
* 1. The shadow page table entry is not present, which could mean that
* the fault is potentially caused by access tracking (if enabled).
* 2. The shadow page table entry is present and the fault
* is caused by write-protect, that means we just need change the W
* bit of the spte which can be done out of mmu-lock.
*
* However, if access tracking is disabled we know that a non-present
* page must be a genuine page fault where we have to create a new SPTE.
* So, if access tracking is disabled, we return true only for write
* accesses to a present page.
*/
return shadow_acc_track_mask != 0 ||
((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
}
/*
* Returns true if the SPTE was fixed successfully. Otherwise,
* someone else modified the SPTE from its original value.
*/
static bool
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *sptep, u64 old_spte, u64 new_spte)
{
gfn_t gfn;
WARN_ON(!sp->role.direct);
/*
* Theoretically we could also set dirty bit (and flush TLB) here in
* order to eliminate unnecessary PML logging. See comments in
* set_spte. But fast_page_fault is very unlikely to happen with PML
* enabled, so we do not do this. This might result in the same GPA
* to be logged in PML buffer again when the write really happens, and
* eventually to be called by mark_page_dirty twice. But it's also no
* harm. This also avoids the TLB flush needed after setting dirty bit
* so non-PML cases won't be impacted.
*
* Compare with set_spte where instead shadow_dirty_mask is set.
*/
if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
return false;
if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
/*
* The gfn of direct spte is stable since it is
* calculated by sp->gfn.
*/
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
kvm_vcpu_mark_page_dirty(vcpu, gfn);
}
return true;
}
static bool is_access_allowed(u32 fault_err_code, u64 spte)
{
if (fault_err_code & PFERR_FETCH_MASK)
return is_executable_pte(spte);
if (fault_err_code & PFERR_WRITE_MASK)
return is_writable_pte(spte);
/* Fault was on Read access */
return spte & PT_PRESENT_MASK;
}
/*
* Return value:
* - true: let the vcpu to access on the same address again.
* - false: let the real page fault path to fix it.
*/
static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
u32 error_code)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
bool fault_handled = false;
u64 spte = 0ull;
uint retry_count = 0;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return false;
if (!page_fault_can_be_fast(error_code))
return false;
walk_shadow_page_lockless_begin(vcpu);
do {
u64 new_spte;
for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
if (!is_shadow_present_pte(spte) ||
iterator.level < level)
break;
sp = page_header(__pa(iterator.sptep));
if (!is_last_spte(spte, sp->role.level))
break;
/*
* Check whether the memory access that caused the fault would
* still cause it if it were to be performed right now. If not,
* then this is a spurious fault caused by TLB lazily flushed,
* or some other CPU has already fixed the PTE after the
* current CPU took the fault.
*
* Need not check the access of upper level table entries since
* they are always ACC_ALL.
*/
if (is_access_allowed(error_code, spte)) {
fault_handled = true;
break;
}
new_spte = spte;
if (is_access_track_spte(spte))
new_spte = restore_acc_track_spte(new_spte);
/*
* Currently, to simplify the code, write-protection can
* be removed in the fast path only if the SPTE was
* write-protected for dirty-logging or access tracking.
*/
if ((error_code & PFERR_WRITE_MASK) &&
spte_can_locklessly_be_made_writable(spte))
{
new_spte |= PT_WRITABLE_MASK;
/*
* Do not fix write-permission on the large spte. Since
* we only dirty the first page into the dirty-bitmap in
* fast_pf_fix_direct_spte(), other pages are missed
* if its slot has dirty logging enabled.
*
* Instead, we let the slow page fault path create a
* normal spte to fix the access.
*
* See the comments in kvm_arch_commit_memory_region().
*/
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
break;
}
/* Verify that the fault can be handled in the fast path */
if (new_spte == spte ||
!is_access_allowed(error_code, new_spte))
break;
/*
* Currently, fast page fault only works for direct mapping
* since the gfn is not stable for indirect shadow page. See
* Documentation/virt/kvm/locking.txt to get more detail.
*/
fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
iterator.sptep, spte,
new_spte);
if (fault_handled)
break;
if (++retry_count > 4) {
printk_once(KERN_WARNING
"kvm: Fast #PF retrying more than 4 times.\n");
break;
}
} while (true);
trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
spte, fault_handled);
walk_shadow_page_lockless_end(vcpu);
return fault_handled;
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
gfn_t gfn, bool prefault)
{
int r;
int level;
bool force_pt_level = false;
kvm_pfn_t pfn;
unsigned long mmu_seq;
bool map_writable, write = error_code & PFERR_WRITE_MASK;
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
/*
* This path builds a PAE pagetable - so we can map
* 2mb pages at maximum. Therefore check if the level
* is larger than that.
*/
if (level > PT_DIRECTORY_LEVEL)
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
if (fast_page_fault(vcpu, v, level, error_code))
return RET_PF_RETRY;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
return r;
r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return r;
}
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp;
if (!VALID_PAGE(*root_hpa))
return;
sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK);
--sp->root_count;
if (!sp->root_count && sp->role.invalid)
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
*root_hpa = INVALID_PAGE;
}
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
ulong roots_to_free)
{
int i;
LIST_HEAD(invalid_list);
bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
/* Before acquiring the MMU lock, see if we need to do any real work. */
if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
VALID_PAGE(mmu->prev_roots[i].hpa))
break;
if (i == KVM_MMU_NUM_PREV_ROOTS)
return;
}
spin_lock(&vcpu->kvm->mmu_lock);
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
&invalid_list);
if (free_active_root) {
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
(mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
&invalid_list);
} else {
for (i = 0; i < 4; ++i)
if (mmu->pae_root[i] != 0)
mmu_free_root_page(vcpu->kvm,
&mmu->pae_root[i],
&invalid_list);
mmu->root_hpa = INVALID_PAGE;
}
mmu->root_cr3 = 0;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
spin_unlock(&vcpu->kvm->mmu_lock);
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
int ret = 0;
if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
ret = 1;
}
return ret;
}
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
unsigned i;
if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
spin_lock(&vcpu->kvm->mmu_lock);
if(make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, 0, 0,
vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = __pa(sp->spt);
} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
if (make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
}
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return 0;
}
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
u64 pdptr, pm_mask;
gfn_t root_gfn, root_cr3;
int i;
root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
root_gfn = root_cr3 >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
/*
* Do we shadow a long mode page table? If so we need to
* write-protect the guests page table root.
*/
if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu->root_hpa;
MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
if (make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = root;
goto set_root_cr3;
}
/*
* We shadow a 32 bit page table. This may be a legacy 2-level
* or a PAE 3-level page table. In either case we need to be aware that
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root));
if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
if (!(pdptr & PT_PRESENT_MASK)) {
vcpu->arch.mmu->pae_root[i] = 0;
continue;
}
root_gfn = pdptr >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
}
spin_lock(&vcpu->kvm->mmu_lock);
if (make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->pae_root[i] = root | pm_mask;
}
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
/*
* If we shadow a 32 bit page table with a long mode page
* table we enter this path.
*/
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
if (vcpu->arch.mmu->lm_root == NULL) {
/*
* The additional page necessary for this is only
* allocated on demand.
*/
u64 *lm_root;
lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (lm_root == NULL)
return 1;
lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
vcpu->arch.mmu->lm_root = lm_root;
}
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
}
set_root_cr3:
vcpu->arch.mmu->root_cr3 = root_cr3;
return 0;
}
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.mmu->direct_map)
return mmu_alloc_direct_roots(vcpu);
else
return mmu_alloc_shadow_roots(vcpu);
}
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_mmu_page *sp;
if (vcpu->arch.mmu->direct_map)
return;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root);
/*
* Even if another CPU was marking the SP as unsync-ed
* simultaneously, any guest page table changes are not
* guaranteed to be visible anyway until this VCPU issues a TLB
* flush strictly after those changes are made. We only need to
* ensure that the other CPU sets these flags before any actual
* changes to the page tables are made. The comments in
* mmu_need_write_protect() describe what could go wrong if this
* requirement isn't satisfied.
*/
if (!smp_load_acquire(&sp->unsync) &&
!smp_load_acquire(&sp->unsync_children))
return;
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
mmu_sync_children(vcpu, sp);
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
spin_unlock(&vcpu->kvm->mmu_lock);
return;
}
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK;
sp = page_header(root);
mmu_sync_children(vcpu, sp);
}
}
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
spin_unlock(&vcpu->kvm->mmu_lock);
}
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, struct x86_exception *exception)
{
if (exception)
exception->error_code = 0;
return vaddr;
}
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access,
struct x86_exception *exception)
{
if (exception)
exception->error_code = 0;
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
}
static bool
__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
{
int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;
return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
}
static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
{
return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
}
static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
{
return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
}
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
/*
* A nested guest cannot use the MMIO cache if it is using nested
* page tables, because cr2 is a nGPA while the cache stores GPAs.
*/
if (mmu_is_nested(vcpu))
return false;
if (direct)
return vcpu_match_mmio_gpa(vcpu, addr);
return vcpu_match_mmio_gva(vcpu, addr);
}
/* return true if reserved bit is detected on spte. */
static bool
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
struct kvm_shadow_walk_iterator iterator;
u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
int root, leaf;
bool reserved = false;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto exit;
walk_shadow_page_lockless_begin(vcpu);
for (shadow_walk_init(&iterator, vcpu, addr),
leaf = root = iterator.level;
shadow_walk_okay(&iterator);
__shadow_walk_next(&iterator, spte)) {
spte = mmu_spte_get_lockless(iterator.sptep);
sptes[leaf - 1] = spte;
leaf--;
if (!is_shadow_present_pte(spte))
break;
reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
iterator.level);
}
walk_shadow_page_lockless_end(vcpu);
if (reserved) {
pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
__func__, addr);
while (root > leaf) {
pr_err("------ spte 0x%llx level %d.\n",
sptes[root - 1], root);
root--;
}
}
exit:
*sptep = spte;
return reserved;
}
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
u64 spte;
bool reserved;
if (mmio_info_in_cache(vcpu, addr, direct))
return RET_PF_EMULATE;
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
if (WARN_ON(reserved))
return -EINVAL;
if (is_mmio_spte(spte)) {
gfn_t gfn = get_mmio_spte_gfn(spte);
unsigned access = get_mmio_spte_access(spte);
if (!check_mmio_spte(vcpu, spte))
return RET_PF_INVALID;
if (direct)
addr = 0;
trace_handle_mmio_page_fault(addr, gfn, access);
vcpu_cache_mmio_info(vcpu, addr, gfn, access);
return RET_PF_EMULATE;
}
/*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
return RET_PF_RETRY;
}
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
u32 error_code, gfn_t gfn)
{
if (unlikely(error_code & PFERR_RSVD_MASK))
return false;
if (!(error_code & PFERR_PRESENT_MASK) ||
!(error_code & PFERR_WRITE_MASK))
return false;
/*
* guest is writing the page which is write tracked which can
* not be fixed by page fault handler.
*/
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
return true;
return false;
}
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
struct kvm_shadow_walk_iterator iterator;
u64 spte;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
clear_sp_write_flooding_count(iterator.sptep);
if (!is_shadow_present_pte(spte))
break;
}
walk_shadow_page_lockless_end(vcpu);
}
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault)
{
gfn_t gfn = gva >> PAGE_SHIFT;
int r;
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
return nonpaging_map(vcpu, gva & PAGE_MASK,
error_code, gfn, prefault);
}
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
{
struct kvm_arch_async_pf arch;
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu->direct_map;
arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
{
struct kvm_memory_slot *slot;
bool async;
/*
* Don't expose private memslots to L2.
*/
if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
*pfn = KVM_PFN_NOSLOT;
return false;
}
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
async = false;
*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
if (!async)
return false; /* *pfn has correct page already */
if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
return true;
} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
return true;
}
*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
return false;
}
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len)
{
int r = 1;
vcpu->arch.l1tf_flush_l1d = true;
switch (vcpu->arch.apf.host_apf_reason) {
default:
trace_kvm_page_fault(fault_address, error_code);
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, fault_address);
r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
insn_len);
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
vcpu->arch.apf.host_apf_reason = 0;
local_irq_disable();
kvm_async_pf_task_wait(fault_address, 0);
local_irq_enable();
break;
case KVM_PV_REASON_PAGE_READY:
vcpu->arch.apf.host_apf_reason = 0;
local_irq_disable();
kvm_async_pf_task_wake(fault_address);
local_irq_enable();
break;
}
return r;
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
static bool
check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
{
int page_num = KVM_PAGES_PER_HPAGE(level);
gfn &= ~(page_num - 1);
return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
}
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
bool prefault)
{
kvm_pfn_t pfn;
int r;
int level;
bool force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
PT_DIRECTORY_LEVEL);
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
if (level > PT_DIRECTORY_LEVEL &&
!check_hugepage_cache_consistency(vcpu, gfn, level))
level = PT_DIRECTORY_LEVEL;
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
if (fast_page_fault(vcpu, gpa, level, error_code))
return RET_PF_RETRY;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
return r;
r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
if (make_mmu_pages_available(vcpu) < 0)
goto out_unlock;
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return r;
}
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = true;
context->nx = false;
}
/*
* Find out if a previously cached root matching the new CR3/role is available.
* The current root is also inserted into the cache.
* If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
* returned.
* Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
* false is returned. This root should now be freed by the caller.
*/
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role)
{
uint i;
struct kvm_mmu_root_info root;
struct kvm_mmu *mmu = vcpu->arch.mmu;
root.cr3 = mmu->root_cr3;
root.hpa = mmu->root_hpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
swap(root, mmu->prev_roots[i]);
if (new_cr3 == root.cr3 && VALID_PAGE(root.hpa) &&
page_header(root.hpa) != NULL &&
new_role.word == page_header(root.hpa)->role.word)
break;
}
mmu->root_hpa = root.hpa;
mmu->root_cr3 = root.cr3;
return i < KVM_MMU_NUM_PREV_ROOTS;
}
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role,
bool skip_tlb_flush)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
/*
* For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
* having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
* later if necessary.
*/
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
mmu->root_level >= PT64_ROOT_4LEVEL) {
if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
return false;
if (cached_root_available(vcpu, new_cr3, new_role)) {
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_x86_ops->tlb_flush(vcpu, true);
}
/*
* The last MMIO access's GVA and GPA are cached in the
* VCPU. When switching to a new CR3, that GVA->GPA
* mapping may no longer be valid. So clear any cached
* MMIO info even when we don't need to sync the shadow
* page tables.
*/
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
__clear_sp_write_flooding_count(
page_header(mmu->root_hpa));
return true;
}
}
return false;
}
static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role,
bool skip_tlb_flush)
{
if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
KVM_MMU_ROOT_CURRENT);
}
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
{
__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
skip_tlb_flush);
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
return kvm_read_cr3(vcpu);
}
static void inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
vcpu->arch.mmu->inject_page_fault(vcpu, fault);
}
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned access, int *nr_present)
{
if (unlikely(is_mmio_spte(*sptep))) {
if (gfn != get_mmio_spte_gfn(*sptep)) {
mmu_spte_clear_no_track(sptep);
return true;
}
(*nr_present)++;
mark_mmio_spte(vcpu, sptep, gfn, access);
return true;
}
return false;
}
static inline bool is_last_gpte(struct kvm_mmu *mmu,
unsigned level, unsigned gpte)
{
/*
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
* If it is clear, there are no large pages at this level, so clear
* PT_PAGE_SIZE_MASK in gpte if that is the case.
*/
gpte &= level - mmu->last_nonleaf_level;
/*
* PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
* iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
* level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
*/
gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
return gpte & PT_PAGE_SIZE_MASK;
}
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE
#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE
static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct rsvd_bits_validate *rsvd_check,
int maxphyaddr, int level, bool nx, bool gbpages,
bool pse, bool amd)
{
u64 exb_bit_rsvd = 0;
u64 gbpages_bit_rsvd = 0;
u64 nonleaf_bit8_rsvd = 0;
rsvd_check->bad_mt_xwr = 0;
if (!nx)
exb_bit_rsvd = rsvd_bits(63, 63);
if (!gbpages)
gbpages_bit_rsvd = rsvd_bits(7, 7);
/*
* Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
* leaf entries) on AMD CPUs only.
*/
if (amd)
nonleaf_bit8_rsvd = rsvd_bits(8, 8);
switch (level) {
case PT32_ROOT_LEVEL:
/* no rsvd bits for 2 level 4K page table entries */
rsvd_check->rsvd_bits_mask[0][1] = 0;
rsvd_check->rsvd_bits_mask[0][0] = 0;
rsvd_check->rsvd_bits_mask[1][0] =
rsvd_check->rsvd_bits_mask[0][0];
if (!pse) {
rsvd_check->rsvd_bits_mask[1][1] = 0;
break;
}
if (is_cpuid_PSE36())
/* 36bits PSE 4MB page */
rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
else
/* 32 bits PSE 4MB page */
rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
break;
case PT32E_ROOT_LEVEL:
rsvd_check->rsvd_bits_mask[0][2] =
rsvd_bits(maxphyaddr, 63) |
rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */
rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62); /* PDE */
rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62); /* PTE */
rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
rsvd_check->rsvd_bits_mask[1][0] =
rsvd_check->rsvd_bits_mask[0][0];
break;
case PT64_ROOT_5LEVEL:
rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4];
/* fall through */
case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
nonleaf_bit8_rsvd | gbpages_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][3] =
rsvd_check->rsvd_bits_mask[0][3];
rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 29);
rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
rsvd_check->rsvd_bits_mask[1][0] =
rsvd_check->rsvd_bits_mask[0][0];
break;
}
}
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
cpuid_maxphyaddr(vcpu), context->root_level,
context->nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
is_pse(vcpu), guest_cpuid_is_amd(vcpu));
}
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
int maxphyaddr, bool execonly)
{
u64 bad_mt_xwr;
rsvd_check->rsvd_bits_mask[0][4] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
rsvd_check->rsvd_bits_mask[0][3] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
rsvd_check->rsvd_bits_mask[0][2] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
rsvd_check->rsvd_bits_mask[0][1] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
/* large page */
rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
rsvd_check->rsvd_bits_mask[1][2] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
rsvd_check->rsvd_bits_mask[1][1] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */
bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */
bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */
bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */
if (!execonly) {
/* bits 0..2 must not be 100 unless VMX capabilities allow it */
bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
}
rsvd_check->bad_mt_xwr = bad_mt_xwr;
}
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
cpuid_maxphyaddr(vcpu), execonly);
}
/*
* the page table on host is the shadow page table for the page
* table in guest or amd nested guest, its mmu features completely
* follow the features in guest.
*/
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
bool uses_nx = context->nx ||
context->mmu_role.base.smep_andnot_wp;
struct rsvd_bits_validate *shadow_zero_check;
int i;
/*
* Passing "true" to the last argument is okay; it adds a check
* on bit 8 of the SPTEs which KVM doesn't use anyway.
*/
shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
shadow_phys_bits,
context->shadow_root_level, uses_nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
is_pse(vcpu), true);
if (!shadow_me_mask)
return;
for (i = context->shadow_root_level; --i >= 0;) {
shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
}
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
static inline bool boot_cpu_is_amd(void)
{
WARN_ON_ONCE(!tdp_enabled);
return shadow_x_mask == 0;
}
/*
* the direct page table on host, use as much mmu features as
* possible, however, kvm currently does not do execution-protection.
*/
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
struct rsvd_bits_validate *shadow_zero_check;
int i;
shadow_zero_check = &context->shadow_zero_check;
if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
shadow_phys_bits,
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
true, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
shadow_phys_bits,
false);
if (!shadow_me_mask)
return;
for (i = context->shadow_root_level; --i >= 0;) {
shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
}
}
/*
* as the comments in reset_shadow_zero_bits_mask() except it
* is the shadow page table for intel nested guest.
*/
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
shadow_phys_bits, execonly);
}
#define BYTE_MASK(access) \
((1 & (access) ? 2 : 0) | \
(2 & (access) ? 4 : 0) | \
(3 & (access) ? 8 : 0) | \
(4 & (access) ? 16 : 0) | \
(5 & (access) ? 32 : 0) | \
(6 & (access) ? 64 : 0) | \
(7 & (access) ? 128 : 0))
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept)
{
unsigned byte;
const u8 x = BYTE_MASK(ACC_EXEC_MASK);
const u8 w = BYTE_MASK(ACC_WRITE_MASK);
const u8 u = BYTE_MASK(ACC_USER_MASK);
bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
bool cr0_wp = is_write_protection(vcpu);
for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
unsigned pfec = byte << 1;
/*
* Each "*f" variable has a 1 bit for each UWX value
* that causes a fault with the given PFEC.
*/
/* Faults from writes to non-writable pages */
u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
/* Faults from user mode accesses to supervisor pages */
u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
/* Faults from fetches of non-executable pages*/
u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
/* Faults from kernel mode fetches of user pages */
u8 smepf = 0;
/* Faults from kernel mode accesses of user pages */
u8 smapf = 0;
if (!ept) {
/* Faults from kernel mode accesses to user pages */
u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
/* Not really needed: !nx will cause pte.nx to fault */
if (!mmu->nx)
ff = 0;
/* Allow supervisor writes if !cr0.wp */
if (!cr0_wp)
wf = (pfec & PFERR_USER_MASK) ? wf : 0;
/* Disallow supervisor fetches of user code if cr4.smep */
if (cr4_smep)
smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
/*
* SMAP:kernel-mode data accesses from user-mode
* mappings should fault. A fault is considered
* as a SMAP violation if all of the following
* conditions are true:
* - X86_CR4_SMAP is set in CR4
* - A user page is accessed
* - The access is not a fetch
* - Page fault in kernel mode
* - if CPL = 3 or X86_EFLAGS_AC is clear
*
* Here, we cover the first three conditions.
* The fourth is computed dynamically in permission_fault();
* PFERR_RSVD_MASK bit will be set in PFEC if the access is
* *not* subject to SMAP restrictions.
*/
if (cr4_smap)
smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
}
mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
}
}
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register. Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions. The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD. For all reads or if the last condition holds, WD
* only will be masked away.
*/
static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
bool ept)
{
unsigned bit;
bool wp;
if (ept) {
mmu->pkru_mask = 0;
return;
}
/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
mmu->pkru_mask = 0;
return;
}
wp = is_write_protection(vcpu);
for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
unsigned pfec, pkey_bits;
bool check_pkey, check_write, ff, uf, wf, pte_user;
pfec = bit << 1;
ff = pfec & PFERR_FETCH_MASK;
uf = pfec & PFERR_USER_MASK;
wf = pfec & PFERR_WRITE_MASK;
/* PFEC.RSVD is replaced by ACC_USER_MASK. */
pte_user = pfec & PFERR_RSVD_MASK;
/*
* Only need to check the access which is not an
* instruction fetch and is to a user page.
*/
check_pkey = (!ff && pte_user);
/*
* write access is controlled by PKRU if it is a
* user access or CR0.WP = 1.
*/
check_write = check_pkey && wf && (uf || wp);
/* PKRU.AD stops both read and write access. */
pkey_bits = !!check_pkey;
/* PKRU.WD stops write access. */
pkey_bits |= (!!check_write) << 1;
mmu->pkru_mask |= (pkey_bits & 3) << pfec;
}
}
static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
{
unsigned root_level = mmu->root_level;
mmu->last_nonleaf_level = root_level;
if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
mmu->last_nonleaf_level++;
}
static void paging64_init_context_common(struct kvm_vcpu *vcpu,
struct kvm_mmu *context,
int level)
{
context->nx = is_nx(vcpu);
context->root_level = level;
reset_rsvds_bits_mask(vcpu, context);
update_permission_bitmask(vcpu, context, false);
update_pkru_bitmask(vcpu, context, false);
update_last_nonleaf_level(vcpu, context);
MMU_WARN_ON(!is_pae(vcpu));
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
context->shadow_root_level = level;
context->direct_map = false;
}
static void paging64_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
int root_level = is_la57_mode(vcpu) ?
PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
paging64_init_context_common(vcpu, context, root_level);
}
static void paging32_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
update_permission_bitmask(vcpu, context, false);
update_pkru_bitmask(vcpu, context, false);
update_last_nonleaf_level(vcpu, context);
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = false;
}
static void paging32E_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}
static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
{
union kvm_mmu_extended_role ext = {0};
ext.cr0_pg = !!is_paging(vcpu);
ext.cr4_pae = !!is_pae(vcpu);
ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
ext.cr4_pse = !!is_pse(vcpu);
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
ext.valid = 1;
return ext;
}
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
bool base_only)
{
union kvm_mmu_role role = {0};
role.base.access = ACC_ALL;
role.base.nxe = !!is_nx(vcpu);
role.base.cr0_wp = is_write_protection(vcpu);
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
if (base_only)
return role;
role.ext = kvm_calc_mmu_role_ext(vcpu);
return role;
}
static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
{
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
role.base.direct = true;
role.base.gpte_is_8_bytes = true;
return role;
}
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.mmu;
union kvm_mmu_role new_role =
kvm_calc_tdp_mmu_root_page_role(vcpu, false);
new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
context->mmu_role.as_u64 = new_role.as_u64;
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
context->direct_map = true;
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
context->get_cr3 = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
if (!is_paging(vcpu)) {
context->nx = false;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->root_level = 0;
} else if (is_long_mode(vcpu)) {
context->nx = is_nx(vcpu);
context->root_level = is_la57_mode(vcpu) ?
PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else if (is_pae(vcpu)) {
context->nx = is_nx(vcpu);
context->root_level = PT32E_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging64_gva_to_gpa;
} else {
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
context->gva_to_gpa = paging32_gva_to_gpa;
}
update_permission_bitmask(vcpu, context, false);
update_pkru_bitmask(vcpu, context, false);
update_last_nonleaf_level(vcpu, context);
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
static union kvm_mmu_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
{
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
role.base.smep_andnot_wp = role.ext.cr4_smep &&
!is_write_protection(vcpu);
role.base.smap_andnot_wp = role.ext.cr4_smap &&
!is_write_protection(vcpu);
role.base.direct = !is_paging(vcpu);
role.base.gpte_is_8_bytes = !!is_pae(vcpu);
if (!is_long_mode(vcpu))
role.base.level = PT32E_ROOT_LEVEL;
else if (is_la57_mode(vcpu))
role.base.level = PT64_ROOT_5LEVEL;
else
role.base.level = PT64_ROOT_4LEVEL;
return role;
}
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.mmu;
union kvm_mmu_role new_role =
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
else if (is_long_mode(vcpu))
paging64_init_context(vcpu, context);
else if (is_pae(vcpu))
paging32E_init_context(vcpu, context);
else
paging32_init_context(vcpu, context);
context->mmu_role.as_u64 = new_role.as_u64;
reset_shadow_zero_bits_mask(vcpu, context);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
bool execonly)
{
union kvm_mmu_role role = {0};
/* SMM flag is inherited from root_mmu */
role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
role.base.level = PT64_ROOT_4LEVEL;
role.base.gpte_is_8_bytes = true;
role.base.direct = false;
role.base.ad_disabled = !accessed_dirty;
role.base.guest_mode = true;
role.base.access = ACC_ALL;
/*
* WP=1 and NOT_WP=1 is an impossible combination, use WP and the
* SMAP variation to denote shadow EPT entries.
*/
role.base.cr0_wp = true;
role.base.smap_andnot_wp = true;
role.ext = kvm_calc_mmu_role_ext(vcpu);
role.ext.execonly = execonly;
return role;
}
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp)
{
struct kvm_mmu *context = vcpu->arch.mmu;
union kvm_mmu_role new_role =
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
execonly);
__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
context->shadow_root_level = PT64_ROOT_4LEVEL;
context->nx = true;
context->ept_ad = accessed_dirty;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
context->invlpg = ept_invlpg;
context->update_pte = ept_update_pte;
context->root_level = PT64_ROOT_4LEVEL;
context->direct_map = false;
context->mmu_role.as_u64 = new_role.as_u64;
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.mmu;
kvm_init_shadow_mmu(vcpu);
context->set_cr3 = kvm_x86_ops->set_cr3;
context->get_cr3 = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
new_role.base.word &= mmu_base_role_mask.word;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
return;
g_context->mmu_role.as_u64 = new_role.as_u64;
g_context->get_cr3 = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
/*
* Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
* L1's nested page tables (e.g. EPT12). The nested translation
* of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
* L2's page tables as the first level of translation and L1's
* nested page tables as the second level of translation. Basically
* the gva_to_gpa functions between mmu and nested_mmu are swapped.
*/
if (!is_paging(vcpu)) {
g_context->nx = false;
g_context->root_level = 0;
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
} else if (is_long_mode(vcpu)) {
g_context->nx = is_nx(vcpu);
g_context->root_level = is_la57_mode(vcpu) ?
PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
reset_rsvds_bits_mask(vcpu, g_context);
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
} else if (is_pae(vcpu)) {
g_context->nx = is_nx(vcpu);
g_context->root_level = PT32E_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, g_context);
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
} else {
g_context->nx = false;
g_context->root_level = PT32_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, g_context);
g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
}
update_permission_bitmask(vcpu, g_context, false);
update_pkru_bitmask(vcpu, g_context, false);
update_last_nonleaf_level(vcpu, g_context);
}
void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
{
if (reset_roots) {
uint i;
vcpu->arch.mmu->root_hpa = INVALID_PAGE;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
}
if (mmu_is_nested(vcpu))
init_kvm_nested_mmu(vcpu);
else if (tdp_enabled)
init_kvm_tdp_mmu(vcpu);
else
init_kvm_softmmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
union kvm_mmu_role role;
if (tdp_enabled)
role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
else
role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
return role.base;
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
kvm_init_mmu(vcpu, true);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
int kvm_mmu_load(struct kvm_vcpu *vcpu)
{
int r;
r = mmu_topup_memory_caches(vcpu);
if (r)
goto out;
r = mmu_alloc_roots(vcpu);
kvm_mmu_sync_roots(vcpu);
if (r)
goto out;
kvm_mmu_load_cr3(vcpu);
kvm_x86_ops->tlb_flush(vcpu, true);
out:
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *new)
{
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
return;
}
++vcpu->kvm->stat.mmu_pte_updated;
vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
{
if (!is_shadow_present_pte(old))
return false;
if (!is_shadow_present_pte(new))
return true;
if ((old ^ new) & PT64_BASE_ADDR_MASK)
return true;
old ^= shadow_nx_mask;
new ^= shadow_nx_mask;
return (old & ~new & PT64_PERM_MASK) != 0;
}
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
{
u64 gentry = 0;
int r;
/*
* Assume that the pte write on a page table of the same type
* as the current vcpu paging mode since we update the sptes only
* when they have the same mode.
*/
if (is_pae(vcpu) && *bytes == 4) {
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
*gpa &= ~(gpa_t)7;
*bytes = 8;
}
if (*bytes == 4 || *bytes == 8) {
r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
if (r)
gentry = 0;
}
return gentry;
}
/*
* If we're seeing too many writes to a page, it may no longer be a page table,
* or we may be forking, in which case it is better to unmap the page.
*/
static bool detect_write_flooding(struct kvm_mmu_page *sp)
{
/*
* Skip write-flooding detected for the sp whose level is 1, because
* it can become unsync, then the guest page is not write-protected.
*/
if (sp->role.level == PT_PAGE_TABLE_LEVEL)
return false;
atomic_inc(&sp->write_flooding_count);
return atomic_read(&sp->write_flooding_count) >= 3;
}
/*
* Misaligned accesses are too much trouble to fix up; also, they usually
* indicate a page is not used as a page table.
*/
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
int bytes)
{
unsigned offset, pte_size, misaligned;
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
offset = offset_in_page(gpa);
pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
/*
* Sometimes, the OS only writes the last one bytes to update status
* bits, for example, in linux, andb instruction is used in clear_bit().
*/
if (!(offset & (pte_size - 1)) && bytes == 1)
return false;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4;
return misaligned;
}
static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
unsigned page_offset, quadrant;
u64 *spte;
int level;
page_offset = offset_in_page(gpa);
level = sp->role.level;
*nspte = 1;
if (!sp->role.gpte_is_8_bytes) {
page_offset <<= 1; /* 32->64 */
/*
* A 32-bit pde maps 4MB while the shadow pdes map
* only 2MB. So we need to double the offset again
* and zap two pdes instead of one.
*/
if (level == PT32_ROOT_LEVEL) {
page_offset &= ~7; /* kill rounding error */
page_offset <<= 1;
*nspte = 2;
}
quadrant = page_offset >> PAGE_SHIFT;
page_offset &= ~PAGE_MASK;
if (quadrant != sp->role.quadrant)
return NULL;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
return spte;
}
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
struct kvm_page_track_notifier_node *node)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
u64 entry, gentry, *spte;
int npte;
bool remote_flush, local_flush;
/*
* If we don't have indirect shadow pages, it means no page is
* write-protected, so we can exit simply.
*/
if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
remote_flush = local_flush = false;
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
/*
* No need to care whether allocation memory is successful
* or not since pte prefetch is skiped if it does not have
* enough objects in the cache.
*/
mmu_topup_memory_caches(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
++vcpu->kvm->stat.mmu_flooded;
continue;
}
spte = get_written_sptes(sp, gpa, &npte);
if (!spte)
continue;
local_flush = true;
while (npte--) {
u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
!((sp->role.word ^ base_role)
& mmu_base_role_mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (need_remote_flush(entry, *spte))
remote_flush = true;
++spte;
}
}
kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
spin_unlock(&vcpu->kvm->mmu_lock);
}
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa;
int r;
if (vcpu->arch.mmu->direct_map)
return 0;
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{
LIST_HEAD(invalid_list);
if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
return 0;
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
break;
++vcpu->kvm->stat.mmu_recycled;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
if (!kvm_mmu_available_pages(vcpu->kvm))
return -ENOSPC;
return 0;
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
void *insn, int insn_len)
{
int r, emulation_type = 0;
enum emulation_result er;
bool direct = vcpu->arch.mmu->direct_map;
/* With shadow page tables, fault_address contains a GVA or nGPA. */
if (vcpu->arch.mmu->direct_map) {
vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2;
}
r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2, direct);
if (r == RET_PF_EMULATE)
goto emulate;
}
if (r == RET_PF_INVALID) {
r = vcpu->arch.mmu->page_fault(vcpu, cr2,
lower_32_bits(error_code),
false);
WARN_ON(r == RET_PF_INVALID);
}
if (r == RET_PF_RETRY)
return 1;
if (r < 0)
return r;
/*
* Before emulating the instruction, check if the error code
* was due to a RO violation while translating the guest page.
* This can occur when using nested virtualization with nested
* paging in both guests. If true, we simply unprotect the page
* and resume the guest.
*/
if (vcpu->arch.mmu->direct_map &&
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
return 1;
}
/*
* vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
* optimistically try to just unprotect the page and let the processor
* re-execute the instruction that caused the page fault. Do not allow
* retrying MMIO emulation, as it's not only pointless but could also
* cause us to enter an infinite loop because the processor will keep
* faulting on the non-existent MMIO address. Retrying an instruction
* from a nested guest is also pointless and dangerous as we are only
* explicitly shadowing L1's page tables, i.e. unprotecting something
* for L1 isn't going to magically fix whatever issue cause L2 to fail.
*/
if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
emulation_type = EMULTYPE_ALLOW_RETRY;
emulate:
/*
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
* This can happen if a guest gets a page-fault on data access but the HW
* table walker is not able to read the instruction page (e.g instruction
* page is not present in memory). In those cases we simply restart the
* guest, with the exception of AMD Erratum 1096 which is unrecoverable.
*/
if (unlikely(insn && !insn_len)) {
if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
return 1;
}
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
switch (er) {
case EMULATE_DONE:
return 1;
case EMULATE_USER_EXIT:
++vcpu->stat.mmio_exits;
/* fall through */
case EMULATE_FAIL:
return 0;
default:
BUG();
}
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
int i;
/* INVLPG on a * non-canonical address is a NOP according to the SDM. */
if (is_noncanonical_address(gva, vcpu))
return;
mmu->invlpg(vcpu, gva, mmu->root_hpa);
/*
* INVLPG is required to invalidate any global mappings for the VA,
* irrespective of PCID. Since it would take us roughly similar amount
* of work to determine whether any of the prev_root mappings of the VA
* is marked global, or to just sync it blindly, so we might as well
* just always sync it.
*
* Mappings not reachable via the current cr3 or the prev_roots will be
* synced when switching to that cr3, so nothing needs to be done here
* for them.
*/
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (VALID_PAGE(mmu->prev_roots[i].hpa))
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
kvm_x86_ops->tlb_flush_gva(vcpu, gva);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
bool tlb_flush = false;
uint i;
if (pcid == kvm_get_active_pcid(vcpu)) {
mmu->invlpg(vcpu, gva, mmu->root_hpa);
tlb_flush = true;
}
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
tlb_flush = true;
}
}
if (tlb_flush)
kvm_x86_ops->tlb_flush_gva(vcpu, gva);
++vcpu->stat.invlpg;
/*
* Mappings not reachable via the current cr3 or the prev_roots will be
* synced when switching to that cr3, so nothing needs to be done here
* for them.
*/
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
void kvm_enable_tdp(void)
{
tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);
void kvm_disable_tdp(void)
{
tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);
/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
{
struct slot_rmap_walk_iterator iterator;
bool flush = false;
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
end_gfn, &iterator) {
if (iterator.rmap)
flush |= fn(kvm, iterator.rmap);
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs_with_address(kvm,
start_gfn,
iterator.gfn - start_gfn + 1);
flush = false;
}
cond_resched_lock(&kvm->mmu_lock);
}
}
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
end_gfn - start_gfn + 1);
flush = false;
}
return flush;
}
static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
bool lock_flush_tlb)
{
return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1,
lock_flush_tlb);
}
static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
free_page((unsigned long)vcpu->arch.mmu->pae_root);
free_page((unsigned long)vcpu->arch.mmu->lm_root);
}
static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
struct page *page;
int i;
/*
* When using PAE paging, the four PDPTEs are treated as 'root' pages,
* while the PDP table is a per-vCPU construct that's allocated at MMU
* creation. When emulating 32-bit mode, cr3 is only 32 bits even on
* x86_64. Therefore we need to allocate the PDP table in the first
* 4GB of memory, which happens to fit the DMA32 zone. Except for
* SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
* skip allocating the PDP table.
*/
if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
return 0;
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
if (!page)
return -ENOMEM;
vcpu->arch.mmu->pae_root = page_address(page);
for (i = 0; i < 4; ++i)
vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
return 0;
}
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
uint i;
vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
vcpu->arch.root_mmu.root_cr3 = 0;
vcpu->arch.root_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
vcpu->arch.guest_mmu.root_cr3 = 0;
vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
return alloc_mmu_pages(vcpu);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
unsigned long i;
bool flush;
gfn_t gfn;
spin_lock(&kvm->mmu_lock);
if (list_empty(&kvm->arch.active_mmu_pages))
goto out_unlock;
flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
for (i = 0; i < slot->npages; i++) {
gfn = slot->base_gfn + i;
for_each_valid_sp(kvm, sp, gfn) {
if (sp->gfn != gfn)
continue;
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
flush = false;
cond_resched_lock(&kvm->mmu_lock);
}
}
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
out_unlock:
spin_unlock(&kvm->mmu_lock);
}
void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
}
void kvm_mmu_uninit_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
kvm_page_track_unregister_notifier(kvm, node);
}
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i;
spin_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
gfn_t start, end;
start = max(gfn_start, memslot->base_gfn);
end = min(gfn_end, memslot->base_gfn + memslot->npages);
if (start >= end)
continue;
slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
start, end - 1, true);
}
}
spin_unlock(&kvm->mmu_lock);
}
static bool slot_rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head)
{
return __rmap_write_protect(kvm, rmap_head, false);
}
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
spin_lock(&kvm->mmu_lock);
flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
false);
spin_unlock(&kvm->mmu_lock);
/*
* kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
* which do tlb flush out of mmu-lock should be serialized by
* kvm->slots_lock otherwise tlb flush would be missed.
*/
lockdep_assert_held(&kvm->slots_lock);
/*
* We can flush all the TLBs out of the mmu lock without TLB
* corruption since we just change the spte from writable to
* readonly so that we only need to care the case of changing
* spte from present to present (changing the spte from present
* to nonpresent will flush all the TLBs immediately), in other
* words, the only case we care is mmu_spte_update() where we
* have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
* instead of PT_WRITABLE_MASK, that means it does not depend
* on PT_WRITABLE_MASK anymore.
*/
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
memslot->npages);
}
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
struct kvm_rmap_head *rmap_head)
{
u64 *sptep;
struct rmap_iterator iter;
int need_tlb_flush = 0;
kvm_pfn_t pfn;
struct kvm_mmu_page *sp;
restart:
for_each_rmap_spte(rmap_head, &iter, sptep) {
sp = page_header(__pa(sptep));
pfn = spte_to_pfn(*sptep);
/*
* We cannot do huge page mapping for indirect shadow pages,
* which are found on the last rmap (level = 1) when not using
* tdp; such shadow pages are synced with the page table in
* the guest, and the guest page table is using 4K page size
* mapping if the indirect sp has level = 1.
*/
if (sp->role.direct &&
!kvm_is_reserved_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
pte_list_remove(rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
KVM_PAGES_PER_HPAGE(sp->role.level));
else
need_tlb_flush = 1;
goto restart;
}
}
return need_tlb_flush;
}
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
spin_lock(&kvm->mmu_lock);
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
kvm_mmu_zap_collapsible_spte, true);
spin_unlock(&kvm->mmu_lock);
}
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
spin_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock);
/*
* It's also safe to flush TLBs out of mmu lock here as currently this
* function is only used for dirty logging, in which case flushing TLB
* out of mmu lock also guarantees no dirty pages will be lost in
* dirty_bitmap.
*/
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
spin_lock(&kvm->mmu_lock);
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
false);
spin_unlock(&kvm->mmu_lock);
/* see kvm_mmu_slot_remove_write_access */
lockdep_assert_held(&kvm->slots_lock);
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
spin_lock(&kvm->mmu_lock);
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock);
/* see kvm_mmu_slot_leaf_clear_dirty */
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
int ign;
spin_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
if (mmio_only && !sp->mmio_cached)
continue;
if (sp->role.invalid && sp->root_count)
continue;
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
WARN_ON_ONCE(mmio_only);
goto restart;
}
if (cond_resched_lock(&kvm->mmu_lock))
goto restart;
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
}
void kvm_mmu_zap_all(struct kvm *kvm)
{
return __kvm_mmu_zap_all(kvm, false);
}
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{
WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
gen &= MMIO_SPTE_GEN_MASK;
/*
* Generation numbers are incremented in multiples of the number of
* address spaces in order to provide unique generations across all
* address spaces. Strip what is effectively the address space
* modifier prior to checking for a wrap of the MMIO generation so
* that a wrap in any address space is detected.
*/
gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
/*
* The very rare case: if the MMIO generation number has wrapped,
* zap all shadow pages.
*/
if (unlikely(gen == 0)) {
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
__kvm_mmu_zap_all(kvm, true);
}
}
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
int nr_to_scan = sc->nr_to_scan;
unsigned long freed = 0;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
int idx;
LIST_HEAD(invalid_list);
/*
* Never scan more than sc->nr_to_scan VM instances.
* Will not hit this condition practically since we do not try
* to shrink more than one VM and it is very unlikely to see
* !n_used_mmu_pages so many times.
*/
if (!nr_to_scan--)
break;
/*
* n_used_mmu_pages is accessed without holding kvm->mmu_lock
* here. We may skip a VM instance errorneosly, but we do not
* want to shrink a VM that only started to populate its MMU
* anyway.
*/
if (!kvm->arch.n_used_mmu_pages)
continue;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
freed++;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
/*
* unfair on small ones
* per-vm shrinkers cry out
* sadness comes quickly
*/
list_move_tail(&kvm->vm_list, &vm_list);
break;
}
mutex_unlock(&kvm_lock);
return freed;
}
static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}
static struct shrinker mmu_shrinker = {
.count_objects = mmu_shrink_count,
.scan_objects = mmu_shrink_scan,
.seeks = DEFAULT_SEEKS * 10,
};
static void mmu_destroy_caches(void)
{
kmem_cache_destroy(pte_list_desc_cache);
kmem_cache_destroy(mmu_page_header_cache);
}
static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;
/* Set the present bit. */
mask |= 1ull;
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (IS_ENABLED(CONFIG_X86_64) && shadow_phys_bits == 52)
mask &= ~1ull;
kvm_mmu_set_mmio_spte_mask(mask, mask);
}
int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
/*
* MMU roles use union aliasing which is, generally speaking, an
* undefined behavior. However, we supposedly know how compilers behave
* and the current status quo is unlikely to change. Guardians below are
* supposed to let us know if the assumption becomes false.
*/
BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
kvm_mmu_reset_all_pte_masks();
kvm_set_mmio_spte_mask();
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, SLAB_ACCOUNT, NULL);
if (!pte_list_desc_cache)
goto out;
mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
sizeof(struct kvm_mmu_page),
0, SLAB_ACCOUNT, NULL);
if (!mmu_page_header_cache)
goto out;
if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
goto out;
ret = register_shrinker(&mmu_shrinker);
if (ret)
goto out;
return 0;
out:
mmu_destroy_caches();
return ret;
}
/*
* Calculate mmu pages needed for kvm.
*/
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
{
unsigned long nr_mmu_pages;
unsigned long nr_pages = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots)
nr_pages += memslot->npages;
}
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
return nr_mmu_pages;
}
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu);
}
void kvm_mmu_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);
unregister_shrinker(&mmu_shrinker);
mmu_audit_disable();
}
|
__label__pos
| 0.995619 |
AnyLogic
Expand
Font size
Editing presentation shapes
Moving shapes
AnyLogic supports two ways of moving presentation shapes. First, you can specify shape’s coordinates in pixels in the shape's properties view. And also there is more visual way of moving shapes in the graphical editor using the mouse.
As a rule, when you draw your presentation, it is more convenient to position shapes directly in the editor. While shape properties are commonly used when you need to define how shape should move during the model simulation — the behavior can be easily defined using the corresponding dynamic properties of the shape.
To move a shape graphically
1. Drag the shape using the mouse, or Select the shape in the graphical editor by clicking on it and move it by pressing the arrow keys on your keyboard.
To define shape coordinates in pixels
1. Select the shape in the graphical editor by clicking on it.
2. In the Position and size section of the Properties view, enter the shape’s x- and y-coordinates in X and Y edit boxes correspondingly.
Using modifier keys
When moving presentation shapes, you can use the Ctrl, Alt, and Shift modifier keys to perform additional actions. Modifier keys can work in combination, e.g., Ctrl + Alt + drag creates a copy of the element while allowing to move it with snapping to grid temporarily disabled.
To create a copy of the shape
1. Press and hold the Ctrl key (Mac OS: Cmd key) and drag the shape using the mouse.
To move a shape without snapping it to grid
By default, shapes snap to the graphical editor grid. Use can temporarily disable snapping to grid when moving a shape.
1. Press and hold the Alt key (Mac OS: Option key) and drag the shape using the mouse, or
Press and hold the Alt key and move the shape by pressing the arrow keys on your keyboard.
To shift a shape horizontally or vertically
1. Press and hold the Shift key and drag the shape using the mouse. The shape will only move horizontally or vertically, depending on the dragging direction, thus maintaining vertical or horizontal center alignment.
Resizing shapes
AnyLogic supports two ways of resizing presentation shapes. First, you can specify shape’s width and height in pixels in the shape’s properties view. And also there is more visual way of rotating shapes directly in the graphical editor.
As a rule, when you draw your presentation, it is more convenient to resize shapes directly in the editor. While shape properties are commonly used when you need to define how shape should change its size during the model simulation — the behavior can be easily defined using the corresponding dynamic properties of the shape.
To resize a shape graphically
1. Select the shape in the graphical editor by clicking it.
2. You will see handles, that is, several blue rectangles displayed on the shape’s border.
3. Click on the handle lying on the shape border you want to move (or the corner of the border, in the case you want to move two borders at once) with the left mouse button and, while holding the left mouse button down, move the mouse in the required resize direction.
4. Release the mouse button when finished.
To define shape size in pixels
1. Select the shape in the graphical editor by clicking it.
2. In the Position and size section of the Properties view, enter the shape’s height and width in Height and Width edit boxes correspondingly.
You cannot resize a text shape since its size is defined by the text font.
Rotating shapes
You can rotate presentation shapes either in the graphical editor or by specifying the required rotation angle in the shape properties.
You cannot rotate GIS maps and view areas.
Rotating shapes in the graphical editor
Presentation shapes can be rotated directly in the graphical editor by dragging the shape’s rotation point.
To show the rotation point of the shape
1. In case of rectangle, rounded rectangle, oval, arc, text, image, CAD drawing or non-empty group of shapes, select the shape in the graphical editor by clicking it.
In case of polyline or curve, right-click (Mac OS: Ctrl + click) the shape in the graphical editor and choose Edit shape from the context menu.
2. You will see a small circle near or inside the shape (see the figure below). It is the rotation point for the selected shape. If you place the mouse over this point, you will see that the mouse cursor changes to .
In order to Use the mouse like described here Description
Rotate the shape by angles restricted by multiples Click on the rotation point with the left mouse button and, while holding the button down, move the mouse in the required rotation direction:
It is the most frequently used type of rotation.
Rotating the mouse right near the rotation point, you restrict the rotation angles to multiples of 15 degrees (15, 30, …).
Shape rotated by 15 degrees
Moving the mouse aside from the rotation point while dragging, you restrict angles to multiples of 5 degrees (5, 10, …), moving a little bit more aside — you restrict angles to integer values (1, 2, …).
Rotate the shape by any angle Press Alt then click the rotation point and while holding both Alt and the left mouse button down, move the mouse in the required rotation direction:
This type of rotation enables to rotate shapes by any angles (not only by integer ones as in the case described above).
Shape rotated by 5,79… degrees
Rotate the shape by an angle multiple of 90 degrees Press Shift, then click the rotation point and while holding both Shift and the left mouse button down, move the mouse in the required rotation direction:
The shape gets rotated by angles multiple of 90 degrees (90, 180, 270 degrees).
Shape rotated by 90 degrees
Changing the shape’s rotation point
When you rotate a shape, it rotates around its rotation point. For the majority of shapes (rectangle, image, etc.) it is the shape’s top-left corner.
You may need to rotate a shape around its center, which requires shifting the shape’s rotation point. It can be achieved by including the shape in a group: in this case, the group’s origin will be used as a rotation point.
To change the shape’s rotation point
1. Right-click the shape and select Grouping > Create a Group from the context menu.
A new group containing the shape will be created. The group’s rotation point is located in the center of the shape, so you can rotate the group and thus the rectangle around its center.
2. To further reposition the rotation point, right-click the group and select Select Group Contents from the context menu.
3. The group contents, i.e. the rectangle, will be selected. Move the shape so that the rotation point appears in the required position relative to the shape.
Specifying the rotation angle in the shape properties
To define shape’s rotation angle in degrees
1. Select the shape by clicking it in the graphical editor or in the Projects view.
2. In the Position and size section of the Properties view, enter the shape’s rotation angle in the Rotation edit box.
To rotate a shape dynamically at the model runtime, specify an expression that will be re-evaluated at the model runtime and return the shape’s rotation angle. This can be done in the Rotation property located in the Position and size section of the shape properties.
While in the static Rotation property you define angles in degrees, the expression specified in the dynamic Rotation field returns not degrees, but radians.
Changing shape’s color
If your shape covers some area of the presentation diagram, you may paint it over with any color you wish. The color used to paint a shape over is called shape’s fill color. You may choose any color or even a texture as a fill color of your shape.
To change the shape’s fill color
1. Select the shape in the graphical editor by clicking it.
2. Open the Appearance section of the shape’s Properties.
3. Click the arrow in the Fill color control and choose the color you wish. You can choose No fill value if you do not want the shape to be filled.
4. You can change the transparency of the chosen color. To change the shape transparency, click Other Colors... in the opened dialog box. This opens the Colors dialog. Set up the transparency level you like using the Transparency slider. Transparency is defined numerically in the range (0,255): 255 denotes fully opaque color and 0 denotes fully transparent color.
Changing shape’s line appearance
To change the shape’s line color
1. Select the shape in the graphical editor by clicking it.
2. In the Appearance section of the Properties view, choose the color using the Line color control. Choose No Line, if you do not want the line to be drawn.
To change the shape’s line width
1. Select the shape in the graphical editor by clicking it.
2. In the Appearance section of the Properties view, choose the line width from the Line width drop-down list, or
Enter the line width value in pixels in the edit box pt. to the right.
Changing style of shape's outline
By default, every presentation shape appears with an outline drawn as a solid line.
However, you can make this line dotted or dashed:
To change the shape’s outline style
1. Select the shape in the graphical editor or in the Projects view.
2. In the Appearance section of the Properties view, choose the required line style from the Line style drop-down list.
Controlling visibility of shapes
You can control visibility of any presentation shape, chart, or control at runtime using its dynamic property Visible.
To define whether a shape (control, chart) should be visible at runtime
1. Select the element in the graphical editor, or in the Projects view.
2. Go to the shape’s Properties.
3. Switch the Visible control to the dynamic mode.
4. Type a boolean expression specifying the visibility of the element in the Visible edit box. The shape will be visible when the specified expression evaluates to true, and not visible otherwise. You can call your own boolean function that returns true or false depending on some object’s state at runtime.
Changing shape’s position in stacking order
Although the 2D presentation is flat, shapes are drawn in a certain order and shown one above the other. This order is called the Z-order because there is a virtual Z axis perpendicular to the surface of the presentation and extending towards the viewer.
There are four common commands that allow you to change the Z-order of a shape:
Command Description
Bring To Front Places the shape on top of all other shapes.
Bring Forward Moves the shape one step up (swaps it with the shape directly in front).
Send Backward Moves the shape one step down (sends the shape backward one level).
Send To Back
Places the shape below all other shapes.
To change the Z-order of a shape
1. Select the shape in the graphical editor by clicking it.
2. Click the corresponding [Command] toolbar button, or
Right-click (Mac OS: Ctrl + click) the shape in the graphical editor and choose Order > [Command] from the context menu, or
Choose Draw > Order > [Command] from the main menu.
The figure below illustrates these commands. Here we change the Z-order of the red star that is initially placed below two shapes (cyan rounded rectangle and white oval) and on top of two other ones (circle and rectangle). You can see that by changing the Z-order of a shape, you move it in the Presentation tree up or down. The elements of the Presentation tree are actually displayed according to their Z-order — the shape that is placed below all other shapes is the topmost item of the tree, and so on.
Sometimes shapes get hidden below other shapes. You can still select those shapes either by clicking their location several times in the graphical editor or by using the Presentation tree.
To select a shape that is hidden below other shapes in the graphical editor
1. Click in the position where the hidden shape is located. The top level shape gets selected.
2. Continue clicking in the same position until you select the shape you are looking for. With each click, the next shape in the stacking order will be selected. In the Properties view, you can see which shape is currently selected.
Changing the shape’s Z-order at model runtime
You can change the shape’s Z-order dynamically at model runtime by calling presentation.remove() and presentation.insert() functions.
The insert() function takes two arguments. The first one is the integer index of the new position of the shape in the presentation’s Z-order. The second argument is the shape itself.
The shape should be removed from all the groups where it is contained prior to removing this shape from the agent’s presentation.
In the table below we provide you with the code snippets that change the Z-order of the shape named rectangle.
Action Code
Bring to front
presentation.remove(rectangle);
presentation.insert(presentation.size()-1, rectangle);
Send to back
presentation.remove(rectangle);
presentation.insert(0, rectangle);
Bring forward
int shapeIndex = presentation.indexOf(rectangle);
if ( shapeIndex < presentation.size() - 1 )
{
presentation.remove(rectangle);
presentation.insert(shapeIndex 1, rectangle);
}
Send backward
int shapeIndex = presentation.indexOf(rectangle);
if ( shapeIndex > 0 )
{
presentation.remove(rectangle);
presentation.insert(shapeIndex - 1, rectangle);
}
You can also play with visibility of the shapes to create the required dynamic presentation.
Locking shapes
Sometimes you may need to lock a presentation shape so that it could not be selected in the graphical editor by a mouse click, and model developers would not have a chance to move it, resize, or modify its properties anyhow.
It is frequently needed when you have some background image on the presentation (e.g. a layout of a factory or a hospital department), used as a base for your animation. In this case when editing some shape laying over your layout you may accidentally edit the layout itself (e.g move it aside) thereof sometimes it is hard to click exactly on the shape border and you may select the background image instead of the shape you need.
AnyLogic allows users to lock presentation shapes. Locked shapes do not react to mouse clicks — it is impossible to select them in the graphical editor until you unlock them. As for the described example, locking your background image will significantly simplify editing animation as you will not be able to select the layout by inaccurate mouse click.
To lock a shape
1. Select the shape in the graphical editor or in the Projects view.
2. Select the Lock checkbox in the Properties of this shape.
To unlock a shape
1. Select the shape in the Projects view.
2. Clear the Lock checkbox in Properties of this shape.
To unlock all shapes
1. Right-click (Mac OS: Ctrl + click) the empty space in the graphical editor and choose Locking > Unlock all shapes from the popup menu.
This will unlock all shapes located on the currently active presentation diagram.
How can we improve this article?
|
__label__pos
| 0.979694 |
- let ratios = [ '1/ 3', '1/ 2', '9/ 16', '3/ 5', '2/ 3', '3/ 4', '4/ 5', '1/ 1', '5/ 4', '4/ 3', '3/ 2', '5/ 3', '16/ 10', '16/ 9', '1.85/ 1', '2/ 1', '2.35/ 1', '3/ 1', '4/ 1' ];
- let n = ratios.length;
- let ratio = '1/ 1';
- let val = Math.max(0, ratios.indexOf(ratio));
- let palettes = [
- '#ffb528, #ff8c40, #f86759, #e14b71, #bb3e85, #883d90',
- '#95ffdc, #47fff1, #00e1fa, #00c1f6, #419fe3, #717dbf',
- '#b5ce4f, #eed54e, #fbb64b, #ff9854, #fc7c64, #eb6575',
- '#885789, #cf6c84, #f7966f, #f6d169, #8bbd69, #139d7f',
- '#00aab7, #2889ac, #48678f, #8478ac, #c486b8, #fd98b3',
- '#ffca81, #f29e85, #cc7d8e, #94688c, #595678, #4c7ba3',
- '#f9ffce, #e7cab2, #b8a09f, #91ffe2, #00edff, #00b3ff',
- '#92ffe1, #00ecff, #5baeff, #fdffcd, #ff82a3, #a06eb2'
- ];
- let m = palettes.length;
- let u = palettes[0].split(', ')
- let p = u.length, q = Math.ceil(.5*p);
- let e = .5;
body(style=`--ratio: ${ratio}; --val: ${val}`)
style
| .card {
| --sl0: #{u.slice(0, q).map((c, i) => `var(--c${q - i - 1}) 0% ${(i + 1)*45/q}deg`)};
| --sl1: #{u.slice(q).map((c, i) => `var(--c${q + i}) 0% ${(i + 1)*45/q}deg`)}
| }
form(style=`--max: ${n - 1}`)
label(for='val') Control card aspect ratio
input#val(type='range' value=val max=n - 1 list='l')
output(for='val') #{ratio}
datalist#l
- for(let i = 0; i < n; i++)
option(value=i label=ratios[i] style=`--idx: ${i}`)
section
- for(let i = 0; i < m; i++)
.card(style=`${palettes[i].split(', ').map((c, j) => `--c${j}: ${c}`).join('; ')}`)
View Compiled
$g: #000 #1b1d22 #22242a #323741 #484d5a #7b849c #c4c4c4 #fff;
$c: #bf79f0 #fb678c #fb8c6c #eaa753;
$u: 1.25;
$t: .3s;
$label-w: 24ch;
$value-w: 9ch;
$value-b: 1ch;
$value-r: 5px;
$value-a: 70deg;
$value-e: 5deg;
$thumb-d: 2ch;
$thumb-r: .5*$thumb-d;
$thumb-f: .5;
$thumb-l: 3px;
$track-s: .5*$value-w - $thumb-r;
$track-h: 6px;
$track-r: .5*$track-h;
$ruler-h: .375em;
$card-r: .25em;
@mixin track() {
border: none;
width: 100%; height: $track-h;
border-radius: $track-h;
background:
radial-gradient(circle at var(--pos),
transparent calc(#{$thumb-f}*#{$thumb-r} + #{$thumb-l}),
nth($g, 5) calc(#{$thumb-f}*#{$thumb-r} + #{$thumb-l} + 1px))
}
@mixin thumb($f: 0) {
margin-top: calc(#{$f}*(#{$track-r} - #{$thumb-r}));
border: none;
width: $thumb-d; height: $thumb-d;
border-radius: 50%;
transform: scale(calc(var(--not-hl)*#{$thumb-f} + var(--hl)));
background: nth($c, 4);
transition: transform $t;
cursor: ew-resize
}
* { box-sizing: inherit; margin: 0; border: none; padding: 0; background: transparent; color: inherit; font: inherit }
html {
overflow-x: hidden;
background: nth($g, 3);
color: nth($g, 6);
font: clamp(.625em, 6.25vw, #{$u*1em})
ubuntu mono, consolas, monaco, monospace;
}
body, form, section, datalist, option, div { display: grid }
body {
box-sizing: border-box;
grid-gap: .5*$track-s;
padding: .5*$track-s
}
form, section { max-width: 100% }
form {
--i: var(--narr, 1);
--not-i: calc(1 - var(--i));
--j: var(--wide, 0);
--not-j: calc(1 - var(--j));
--hl: 0;
--not-hl: calc(1 - var(--hl));
--pos: calc(#{$thumb-r} + var(--val)/var(--max)*(100% - #{$thumb-d}));
--col: calc(100% + var(--j)*#{-1*$label-w} + var(--not-i)*#{-1*$value-w});
overflow-x: hidden;
padding: .5*$track-s $track-s;
border-radius: $card-r;
grid-template-columns:
calc(var(--j)*#{$label-w}) var(--col) calc(var(--not-i)*#{$value-w});
box-shadow: 0 0 1px 1px nth($g, 5);
background: nth($g, 2);
filter: Saturate(var(--hl));
&:focus-within { --hl: 1 }
@media (min-width: 1005px) { --narr: 0 }
@media (min-width: 1600px) { --wide: 1 }
}
label {
align-self: center;
grid-column: 1/ calc(2 + var(--not-j));
font: 1rem / 1.25 raleway,
trebuchet ms, verdana, arial, sans-serif
}
input, output {
grid-row: calc(1 + var(--not-j))
}
input {
&, &::-webkit-slider-runnable-track,
&::-webkit-slider-thumb { -webkit-appearance: none }
grid-column: 2;
padding: $ruler-h 0;
background:
repeating-linear-gradient(90deg,
nth($g, 5) 0 2px,
transparent 0 calc((100% - #{$thumb-d})/var(--max)))
calc(#{$thumb-r} - 1px) 100%/ 100% #{$ruler-h} no-repeat;
&:focus { outline: none }
&::-webkit-slider-runnable-track { @include track }
&::-moz-range-track { @include track }
&::-webkit-slider-thumb { @include thumb(1) }
&::-moz-range-thumb { @include thumb }
}
output {
--xy: calc(var(--i)*50%) calc((1 + var(--i))*50%);
grid-column: calc(2 + var(--not-i));
place-self: center start;
position: relative;
left: calc(var(--i)*var(--pos));
border: solid $value-b transparent;
padding: .25em 0;
width: $value-w;
border-radius: calc(#{$value-b} + #{$value-r});
transform:
translate(calc(var(--i)*-50%), calc(var(--i)*(-50% - #{$thumb-r})))
scale(calc(var(--not-i) + var(--i)*var(--hl)));
background: nth($c, 4) padding-box;
color: nth($g, 2);
text-align: center;
transition: transform $t;
&::before {
position: absolute;
top: -$value-b; right: -$value-b; bottom: -$value-b; left: -$value-b;
z-index: -1;
background:
conic-gradient(
from calc((1 - var(--i))*90deg - .5*#{$value-a})
at var(--xy),
transparent,
nth($c, 4) $value-e $value-a - $value-e,
transparent $value-a)
var(--xy)/ 50% 50% no-repeat;
content: ''
}
}
output, datalist { word-spacing: -1ch }
datalist {
--ruler-w: calc((var(--max) + 1)*(100% - #{$thumb-d})/var(--max));
grid-area: calc(2 + var(--not-j))/ 2;
grid-template-columns: repeat(calc(var(--max) + 1), calc(100%/(var(--max) + 1)));
place-self: center;
width: var(--ruler-w)
}
option {
place-content: center;
place-self: center;
@media (max-width: 1250px) { font-size: .8em }
@media (max-width: 900px) and (min-width: 641px) {
&:not(:nth-child(2n + 1)) { transform: scale(0) }
}
@media (max-width: 640px) {
&:not(:nth-child(3n + 1)) { transform: scale(0) }
}
}
section {
grid-gap: $card-r;
grid-template-columns: repeat(auto-fit, Min(100%, calc(5em*(1 + var(--ratio)))));
place-content: center
}
.card {
aspect-ratio: var(--ratio);
position: relative;
color: nth($g, 8);
font: 600 1.5em parisienne, z003, segoe script, comic sans ms, cursive;
text-align: center;
text-shadow: 1px 1px 1px nth($g, 2);
&::before {
position: absolute;
z-index: -1;
padding: 50%;
border-radius: #{$card-r}/ calc(#{$card-r}*(var(--ratio)));
transform-origin: 50% 0;
transform: scaley(calc(1/(var(--ratio))));
background:
conic-gradient(from 45deg at 0 100%,
var(--sl0), transparent 0%),
conic-gradient(from 225deg at 100% 0,
var(--sl1), transparent 0%)
var(--c0);
content: ''
}
&::after {
display: grid;
place-content: center;
padding: $card-r;
border-radius: $card-r;
background: rgba(nth($g, 6), .1);
backdrop-filter: blur(1px);
content: 'Hello, Gorgeous!'
}
}
View Compiled
addEventListener('input', e => {
let _t = e.target,
val = +_t.value,
ratio = document.querySelector(`option[value='${val}']`).label;
document.body.style.setProperty('--val', val);
document.body.style.setProperty('--ratio', _t.nextElementSibling.textContent = ratio)
})
/*
Context: created for my Variable Aspect Ratio Card With Conic Gradients Meeting Along the Diagonal (https://css-tricks.com/variable-aspect-ratio-card-with-conic-gradients-meeting-along-the-diagonal/) article on CSS-Tricks.
*/
View Compiled
External CSS
This Pen doesn't use any external CSS resources.
External JavaScript
This Pen doesn't use any external JavaScript resources.
|
__label__pos
| 0.949432 |
001package algs24;
002import stdlib.*;
003
004/* ***********************************************************************
005 * Compilation: javac IndexMaxPQ.java
006 * Execution: java IndexMaxPQ
007 *
008 * Maximum-oriented indexed PQ implementation using a binary heap.
009 *
010 *********************************************************************/
011
012import java.util.Iterator;
013import java.util.NoSuchElementException;
014
015/**
016 * The {@code IndexMaxPQ} class represents an indexed priority queue of generic keys.
017 * It supports the usual <em>insert</em> and <em>delete-the-maximum</em>
018 * operations, along with <em>delete</em> and <em>change-the-key</em>
019 * methods. In order to let the client refer to items on the priority queue,
020 * an integer between {@code 0} and {@code NMAX-1} is associated with each key—the client
021 * uses this integer to specify which key to delete or change.
022 * It also supports methods for peeking at the maximum key,
023 * testing if the priority queue is empty, and iterating through
024 * the keys.
025 * <p>
026 * The <em>insert</em>, <em>delete-the-maximum</em>, <em>delete</em>,
027 * <em>change-key</em>, <em>decrease-key</em>, and <em>increase-key</em>
028 * operations take logarithmic time.
029 * The <em>is-empty</em>, <em>size</em>, <em>max-index</em>, <em>max-key</em>, and <em>key-of</em>
030 * operations take constant time.
031 * Construction takes time proportional to the specified capacity.
032 * <p>
033 * This implementation uses a binary heap along with an array to associate
034 * keys with integers in the given range.
035 * <p>
036 * For additional documentation, see <a href="http://algs4.cs.princeton.edu/24pq">Section 2.4</a> of
037 * <i>Algorithms, 4th Edition</i> by Robert Sedgewick and Kevin Wayne.
038 */
039public class IndexMaxPQ<K extends Comparable<? super K>> implements Iterable<Integer> {
040 private int N; // number of elements on PQ
041 private int[] pq; // binary heap using 1-based indexing
042 private int[] qp; // inverse of pq - qp[pq[i]] = pq[qp[i]] = i
043 private K[] keys; // keys[i] = priority of i
044
045 /**
046 * Create an empty indexed priority queue with indices between {@code 0} and {@code NMAX-1}.
047 * @throws java.lang.IllegalArgumentException if {@code NMAX < 0}
048 */
049 @SuppressWarnings("unchecked")
050 public IndexMaxPQ(int NMAX) {
051 keys = (K[]) new Comparable[NMAX + 1]; // make this of length NMAX??
052 pq = new int[NMAX + 1];
053 qp = new int[NMAX + 1]; // make this of length NMAX??
054 for (int i = 0; i <= NMAX; i++) qp[i] = -1;
055 }
056
057 /**
058 * Is the priority queue empty?
059 */
060 public boolean isEmpty() { return N == 0; }
061
062 /**
063 * Is i an index on the priority queue?
064 * @throws java.lang.IndexOutOfBoundsException unless {@code (0 <= i < NMAX)}
065 */
066 public boolean contains(int i) {
067 return qp[i] != -1;
068 }
069
070
071 /**
072 * Return the number of keys on the priority queue.
073 */
074 public int size() {
075 return N;
076 }
077
078 /**
079 * Associate key with index i.
080 * @throws java.lang.IndexOutOfBoundsException unless {@code 0 <= i < NMAX}
081 * @throws java.lang.IllegalArgumentException if there already is an item associated with index i.
082 */
083 public void insert(int i, K key) {
084 if (contains(i)) throw new IllegalArgumentException("index is already in the priority queue");
085 N++;
086 qp[i] = N;
087 pq[N] = i;
088 keys[i] = key;
089 swim(N);
090 }
091
092 /**
093 * Return the index associated with a maximal key.
094 * @throws java.util.NoSuchElementException if priority queue is empty.
095 */
096 public int maxIndex() {
097 if (N == 0) throw new NoSuchElementException("Priority queue underflow");
098 return pq[1];
099 }
100
101 /**
102 * Return a minimal key.
103 * @throws java.util.NoSuchElementException if priority queue is empty.
104 */
105 public K maxKey() {
106 if (N == 0) throw new NoSuchElementException("Priority queue underflow");
107 return keys[pq[1]];
108 }
109
110 /**
111 * Delete a maximal key and return its associated index.
112 * @throws java.util.NoSuchElementException if priority queue is empty.
113 */
114 public int delMax() {
115 if (N == 0) throw new NoSuchElementException("Priority queue underflow");
116 int min = pq[1];
117 exch(1, N--);
118 sink(1);
119 qp[min] = -1; // delete
120 keys[pq[N+1]] = null; // to help with garbage collection
121 pq[N+1] = -1; // not needed
122 return min;
123 }
124
125 /**
126 * Return the key associated with index i.
127 * @throws java.lang.IndexOutOfBoundsException unless {@code 0 <= i < NMAX}
128 * @throws java.util.NoSuchElementException no key is associated with index i
129 */
130 public K keyOf(int i) {
131 if (!contains(i)) throw new NoSuchElementException("index is not in the priority queue");
132 else return keys[i];
133 }
134
135
136 /**
137 * Change the key associated with index i to the specified value.
138 * @throws java.lang.IndexOutOfBoundsException unless {@code 0 <= i < NMAX}
139 * @throws java.util.NoSuchElementException no key is associated with index i
140 * @deprecated Replaced by changeKey()
141 */
142 @Deprecated public void change(int i, K key) {
143 changeKey(i, key);
144 }
145
146 /**
147 * Change the key associated with index i to the specified value.
148 * @throws java.lang.IndexOutOfBoundsException unless {@code 0 <= i < NMAX}
149 * @throws java.util.NoSuchElementException no key is associated with index i
150 */
151 public void changeKey(int i, K key) {
152 if (!contains(i)) throw new NoSuchElementException("index is not in the priority queue");
153 keys[i] = key;
154 swim(qp[i]);
155 sink(qp[i]);
156 }
157
158 /**
159 * Increase the key associated with index i to the specified value.
160 * @throws java.lang.IndexOutOfBoundsException unless {@code 0 <= i < NMAX}
161 * @throws java.lang.IllegalArgumentException if key ≤ key associated with index i
162 * @throws java.util.NoSuchElementException no key is associated with index i
163 */
164 public void increaseKey(int i, K key) {
165 if (!contains(i)) throw new NoSuchElementException("index is not in the priority queue");
166 if (keys[i].compareTo(key) >= 0) throw new IllegalArgumentException("Calling increaseKey() with given argument would not strictly increase the key");
167
168
169 keys[i] = key;
170 swim(qp[i]);
171 }
172
173
174 /**
175 * Decrease the key associated with index i to the specified value.
176 * @throws java.lang.IndexOutOfBoundsException unless {@code 0 <= i < NMAX}
177 * @throws java.lang.IllegalArgumentException if key ≥ key associated with index i
178 * @throws java.util.NoSuchElementException no key is associated with index i
179 */
180 public void decreaseKey(int i, K key) {
181 if (!contains(i)) throw new NoSuchElementException("index is not in the priority queue");
182 if (keys[i].compareTo(key) <= 0) throw new IllegalArgumentException("Calling decreaseKey() with given argument would not strictly decrease the key");
183
184 keys[i] = key;
185 sink(qp[i]);
186 }
187
188 /**
189 * Delete the key associated with index i.
190 * @throws java.lang.IndexOutOfBoundsException unless {@code 0 <= i < NMAX}
191 * @throws java.util.NoSuchElementException no key is associated with index i
192 */
193 public void delete(int i) {
194 if (!contains(i)) throw new NoSuchElementException("index is not in the priority queue");
195 int index = qp[i];
196 exch(index, N--);
197 swim(index);
198 sink(index);
199 keys[i] = null;
200 qp[i] = -1;
201 }
202
203
204 /* ************************************************************
205 * General helper functions
206 **************************************************************/
207 private boolean less(int i, int j) {
208 return keys[pq[i]].compareTo(keys[pq[j]]) < 0;
209 }
210
211 private void exch(int i, int j) {
212 int swap = pq[i]; pq[i] = pq[j]; pq[j] = swap;
213 qp[pq[i]] = i; qp[pq[j]] = j;
214 }
215
216
217 /* ************************************************************
218 * Heap helper functions
219 **************************************************************/
220 private void swim(int k) {
221 while (k > 1 && less(k/2, k)) {
222 exch(k, k/2);
223 k = k/2;
224 }
225 }
226
227 private void sink(int k) {
228 while (2*k <= N) {
229 int j = 2*k;
230 if (j < N && less(j, j+1)) j++;
231 if (!less(k, j)) break;
232 exch(k, j);
233 k = j;
234 }
235 }
236
237
238 /* *********************************************************************
239 * Iterators
240 **********************************************************************/
241
242 /**
243 * Return an iterator that iterates over all of the elements on the
244 * priority queue in descending order.
245 * <p>
246 * The iterator doesn't implement {@code remove()} since it's optional.
247 */
248 public Iterator<Integer> iterator() { return new HeapIterator(); }
249
250 private class HeapIterator implements Iterator<Integer> {
251 // create a new pq
252 private IndexMaxPQ<K> copy;
253
254 // add all elements to copy of heap
255 // takes linear time since already in heap order so no keys move
256 public HeapIterator() {
257 copy = new IndexMaxPQ<>(pq.length - 1);
258 for (int i = 1; i <= N; i++)
259 copy.insert(pq[i], keys[pq[i]]);
260 }
261
262 public boolean hasNext() { return !copy.isEmpty(); }
263 public void remove() { throw new UnsupportedOperationException(); }
264
265 public Integer next() {
266 if (!hasNext()) throw new NoSuchElementException();
267 return copy.delMax();
268 }
269 }
270
271
272 public static void main(String[] args) {
273 // insert a bunch of strings
274 String[] strings = { "it", "was", "the", "best", "of", "times", "it", "was", "the", "worst" };
275
276 IndexMaxPQ<String> pq = new IndexMaxPQ<>(strings.length);
277 for (int i = 0; i < strings.length; i++) {
278 pq.insert(i, strings[i]);
279 }
280
281 // print each key using the iterator
282 for (int i : pq) {
283 StdOut.println(i + " " + strings[i]);
284 }
285
286 StdOut.println();
287
288 // increase or decrease the key
289 for (int i = 0; i < strings.length; i++) {
290 if (StdRandom.uniform() < 0.5)
291 pq.increaseKey(i, strings[i] + strings[i]);
292 else
293 pq.decreaseKey(i, strings[i].substring(0, 1));
294 }
295
296 // delete and print each key
297 while (!pq.isEmpty()) {
298 String key = pq.maxKey();
299 int i = pq.delMax();
300 StdOut.println(i + " " + key);
301 }
302 StdOut.println();
303
304 // reinsert the same strings
305 for (int i = 0; i < strings.length; i++) {
306 pq.insert(i, strings[i]);
307 }
308
309 // delete them in random order
310 int[] perm = new int[strings.length];
311 for (int i = 0; i < strings.length; i++)
312 perm[i] = i;
313 StdRandom.shuffle(perm);
314 for (int i = 0; i < perm.length; i++) {
315 String key = pq.keyOf(perm[i]);
316 pq.delete(perm[i]);
317 StdOut.println(perm[i] + " " + key);
318 }
319
320 }
321}
|
__label__pos
| 0.949721 |
9
So I wrote a script where you can enter a number and the program will find the highest prime number in that range. The problem is that in PHP, this calculation is really slow with larger numbers, as compared to my JavaScript version, which is the exact same thing but much faster.
//Here Is the PHP code:
<form>
<input type="text" name="input">
</form>
<?php
$input = $_GET['input'];
function Prime($num)
{
if($num < 2)
return false;
for ($i = 2; $i < $num; $i++)
{
if($num % $i == 0)
return false;
}
return true;
}
for($i = $input; $i > 0; $i--)
{
if(Prime($i))
echo $i;
if(Prime($i))
exit();
}
}
Here is the JavaScript variant:
<html>
<script>
var input = prompt("Enter The Number");
function Prime(num) {
for (var i = 2; i < num; i++) {
if(num % i == 0) {
return false;
}
}
return true;
}
for(var i = input; i > 0; i--){
if(Prime(i)){
document.write(i);
}
if(Prime(i)){
exit();
p.thisbreaksthecode();
}
}
</script>
</html>
For the JavaScript code, finding the highest prime in 99999999 takes 1.5 seconds. However, in PHP it takes a whopping 20 seconds. Considering the fact that apart from syntax, the two codes are exactly identical. This tells me something is wrong. What could be the reason for this slow calculation speed? Is it because of the way PHP works? How can I fix it?
10
• 3
The first question: Why do you calculate the prime twice? The second question: Have you read about "profiling"?
– Sven
Jun 19, 2015 at 23:22
• You are comparing different runtimes and server/client programming. I guess that some kind of just in time compilation kicks in when your javascript engine executes.
– collapsar
Jun 19, 2015 at 23:22
• Takes less than 2 seconds to execute on 3v4l.org if you don't calculate the prime twice; and that's without optimising the code in any way - 3v4l.org/hdXNM/perf#tabs
– Mark Baker
Jun 19, 2015 at 23:24
• Calculating the prime twice? Can you specify which line? Jun 19, 2015 at 23:26
• 2
First line: if(Prime(i)){document.write(i)} Second line: if(Prime(i)){ exit(); and similarly in the PHP
– Mark Baker
Jun 19, 2015 at 23:29
2 Answers 2
5
What could be the reason for this slow calculation speed? Is it because of the way PHP works?
Probably; PHP doesn't (currently) do JIT optimisations, so running tight loops like that will be very painful.
How can I fix it?
By picking a better algorithm:
// https://en.wikipedia.org/wiki/Primality_test#PHP_implementation
function isPrime($n)
{
if ($n <= 3) {
return $n > 1;
} else if ($n % 2 === 0 || $n % 3 === 0) {
return false;
} else {
for ($i = 5; $i * $i <= $n; $i += 6) {
if ($n % $i === 0 || $n % ($i + 2) === 0) {
return false;
}
}
return true;
}
}
For your current input it runs 500x faster.
1
• Wow, algorithm is everything. Thanks, I didn't expect that much increase in speed. Jun 21, 2015 at 5:24
0
You are clearly doing something wrong in the way you are running it.
I executed it (php -f calc.php) and it took very little:
<?php
$input = 9999999;
function Prime($num) {
if($num < 2) return false;
for ($i = 2; $i < $num; $i++) {
if($num%$i==0)
return false;
}
return true;
}
$start = microtime(true);
for($i = $input; $i > 0; $i--){
if (Prime($i)){
echo $i . PHP_EOL;
echo (microtime(true) - $start) . PHP_EOL;
exit;
}
}
Takes less than a second to execute: 0.94304203987122
Now if you change $i++ to ++$i it goes down to: 0.67830395698547 (Pre-increment is faster than post-increment in PHP)
2
• 9999999 is equal to 9999999, I think what you meant was 99999999
– Mark B
Jun 20, 2015 at 8:48
• Yes, 9 with eight nines Jun 21, 2015 at 5:20
Your Answer
By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.994547 |
CBSE Solutions for Class 10 English
GSEB std 10 science solution for Gujarati check Subject Chapters Wise::
Identify whether they have One Solution, Infinite Solutions or No Solutions :-
3y+4x=6 , 12y+16x=30
Hide | Show
જવાબ : Infinite Solutions
Identify whether they have One Solution, Infinite Solutions or No Solutions :- y+2x=10 , 3y+6x=30
Hide | Show
જવાબ : One Solution
Identify whether they have One Solution, Infinite Solutions or No Solutions :- 7y−11x=53 ,19y−17x=456
Hide | Show
જવાબ : No Solutions
Identify whether they have One Solution, Infinite Solutions or No Solutions :- y=7,x=−2
Hide | Show
જવાબ : One Solution
Identify whether they have One Solution, Infinite Solutions or No Solutions :- ay+bx=a−b , by−ax=a+b
Hide | Show
જવાબ : One Solution
Identify whether they have One Solution, Infinite Solutions or No Solutions :- 2y+3x=0, 124y+13x=0
Hide | Show
જવાબ : One Solution
Identify whether they have One Solution, Infinite Solutions or No Solutions :- x=11 ,x=−11
Hide | Show
જવાબ : One Solution
Identify whether they have One Solution, Infinite Solutions or No Solutions :- 152y−378x=−74 , −378y+152x=−604
Hide | Show
જવાબ : No Solutions
Using substitution method solve the equation :- x−2y+300=0, 6x−y−70=0
Hide | Show
જવાબ : x=2y−300 ----(1)
6x−y−70=0----(2)
Substituting value from (1) to (2)
6(2y−300)−y−70=06(2y−300)−y−70=0
=> y=170y=170
Putting this in (1)
x=40
Using substitution method solve the equation :- 5x−y=5, 3x−y=3
Hide | Show
જવાબ : 5x−y=5 ----(1)
3x−y=3 ----(2)
Substituting value from (1) to (2)
3x + 5 -5x = 3
-> x=1
Putting this in (1)
-> y=0
Using elimination method solve the equation :- x+y−40= ,7x+3y=180
Hide | Show
જવાબ : x+y−40=0 ---(A)
7x+3y=180 ---(B)
Multiplying equation (A) by 7
7x+7y−280=0 ---(C)
Subtracting equation (B) from equation (C)
We get
4y=100 => y=25
Substituting this in (A) ,we get x=15
Identify whether statement True or False :- 2x+3y=10 & 9x+11y=12 are consistent pair
Hide | Show
જવાબ : True
Identify whether statement True or False :- 51x+68y=110 & 3x+4y=99 are consistent pair
Hide | Show
જવાબ : False
Identify whether statement True or False :- x−3y=3 & 3x−9y=2 are consistent pair
Hide | Show
જવાબ : False
Identify whether statement True or False :- x+6y=12 and 4x+24y=64 are inconsistent pair of equation
Hide | Show
જવાબ : True
Identify whether statement True or False :- 2x+6y=12 and 8x+24y=65 are consistent pair of equation
Hide | Show
જવાબ : False
Identify whether statement True or False :- Lines represented by x−y=0 and x+y=0 are perpendicular to each other
Hide | Show
જવાબ : True
Identify whether statement True or False :- x=0 ,y=0 has one unique solution
Hide | Show
જવાબ : True
Identify whether statement True or False :- There are infinite solution for equation 17x+12y=30
Hide | Show
જવાબ : True
Identify whether statement True or False :- Pair of lines 117x+14y=30 , 65x+11y=19 are consistent and have a unique solution
Hide | Show
જવાબ : True
Identify whether statement True or False :- Line 4x+5y=0 and 11x+17y=0 both passes through origin
Hide | Show
જવાબ : True
find the value of K for which the linear pair has infinite solution
12x+14y=0
36x+Ky=0
Hide | Show
જવાબ : 64
There are 10 students in XII class. Some are maths and some bio student. The no of bio students are 4 more then math’s students. Find the no of math’s and bio students
Hide | Show
જવાબ : Let x be math’s students
y be bio students
Then
x+y=10
y=x+4
Solving these linear pair through any method we get
x=3 and y=7
There are two numbers. Two conditions are there for them
(i) Sum of these two numbers are 100
(ii) One number is four time another number.
What are these numbers?
Hide | Show
જવાબ : Let x and y are the number
x+y=100x+y=100
y=4xy=4x
Solving them we get x=20 and y=80
If 2x + 3y = 12 and 3x - 2y = 5 then solve the equation
Hide | Show
જવાબ : x = 3, y = 2
If 4x+6y=3xy and 8x+9y=5xy then solve the equation
Hide | Show
જવાબ : x=3, y=4
If 29x+37y=103 and 37x+29y=95 then solve the equation
Hide | Show
જવાબ : x=2, y=1
If 2x + y = 2x – y = √8 then solve the equation
Hide | Show
જવાબ : 0
The system kx - y = 2 and 6x - 2y = 3 has a unique solution only when k ≠ ____
Hide | Show
જવાબ : 3
The system x - 2y = 3 and 3x + ky = 1 has a unique solution only when k ≠ ____
Hide | Show
જવાબ : -6
The system x+2y=3 and 5x+ky+7=0 has no solution, when k = _____
Hide | Show
જવાબ : 10
If the lines given by 3x + 2ky = 2 and 2x + 5y + 1 = 0 are parallel then the value of k is ____
Hide | Show
જવાબ : 15/4
For what value of k do the equations kx - 2y = 3 and 3x + y = 5 represent two lines intersecting at a unique point?
Hide | Show
જવાબ : k ≠ -6
The pair of equations x + 2y + 5 = 0 and -3x - 6y + 1 = 0 has _____ solution
Hide | Show
જવાબ : no
The pair of equations 2x + 3y = 5 and 4x + 6y = 15 has _____ solution
Hide | Show
જવાબ : no
Find the value of k for which the system of equations 3x+y=1 and kx+2y=5 has no solution.
Hide | Show
જવાબ : k=6
Find the value of k for which the system of equations 3x+y=1 and kx+2y=5 has a unique solution.
Hide | Show
જવાબ : k≠-6
If x = -y and y > 0, The statement is correct and wrong :- 1/x – 1/y = 0
Hide | Show
જવાબ : Wrong
If x = -y and y > 0, The statement is correct and wrong :- xy < 0
Hide | Show
જવાબ : Correct
If x = -y and y > 0, The statement is correct and wrong :- x + y = 0
Hide | Show
જવાબ : Correct
If x = -y and y > 0, The statement is correct and wrong :- x2y > 0
Hide | Show
જવાબ : Correct
The graphic representation of the equations x+2y=3 and 2x+4y+7=0 gives a pair of ________ lines.
Hide | Show
જવાબ : parallel
The graphs of the equations 2x+3y-2=0 and x-2y-8=0 are two lines which are
Hide | Show
જવાબ : intersecting exactly at one point
The line of the equations 6x - 2y + 9 = 0 and 3x - y + 12 = 0 are ________
Hide | Show
જવાબ : parallel
The sum of the digits of a two-digit number is 15. The number obtained by interchanging the digits exceeds the given number by 9. The number is _____
Hide | Show
જવાબ : 40 years
The sum of the digits of a two-digit number is 15. The number obtained by interchanging the digits exceeds the given number by 9. The number is
Hide | Show
જવાબ : 78
In a cyclic quadrilateral ABCD, it is being given that
∠A = (x + y + 10) °, ∠B = (y + 20) °,
∠C = (x + y - 30)° and ∠D = (x + y)°. Then, ∠B = ?
Hide | Show
જવાબ : 80°
In a ΔABC, ∠C = 3 ∠B = 2 (∠A + ∠B), then ∠B = ?
Hide | Show
જવાબ : 20°
If a pair of linear equations is inconsistent then their graph lines will be ______
Hide | Show
જવાબ : parallel
If a pair of linear equations is consistent then their graph lines will be _____
Hide | Show
જવાબ : intersecting or coincident
A two digit number is seven times the sum of its digits. The number formed by reversing the digits is 18 less than the given number. Find the given number. (2013)
Hide | Show
જવાબ : Let unit’s place digit be x and ten’s place digit be y.
Then original number = x + 10y
and reversed number = 10x + y
According to the Question,
x + 10y = 7(x + y)
x + 10y = 7x + 7y
⇒ 10y – 7y = 7x – x
⇒ 3y = 6x ⇒ y = 2x …(A)
(x + 10y) – (10x + y) = 18
x + 10y – 10x – y = 18
⇒ 9y – 9x = 180
⇒ y – x = 2 …[Dividing by 9
⇒ 2x – x = 2 …[From (A)
∴ x = 2
Putting the value of ‘x’ in (A), we get y = 2(2) = 4
∴ Required number = x + 10y
= 2 + 10(4) = 42
The age of the father is twice the sum of the ages of his 2 children. After 20 years, his age will be equal to the sum of the ages of his children. Find the age of the father. (2012)
Hide | Show
જવાબ : Let the present ages of his children be x years and y years.
Then the present age of the father = 2(x + y) …(A)
After 20 years, his children’s ages will be
(x + 20) and (y + 20) years
After 20 years, father’s age will be 2(x + y) + 20
According to the Question,
2(x + y) + 20 = x + 20 + y + 20
2x + 2y + 20 = x + y + 40
2x + 2y – x – y = 40 – 20
x + y = 20 …[From (A)
Present age of father = 2(20) = 40 years
Sita Devi wants to make a rectangular pond on the road side for the purpose of providing drinking water for street animals. The area of the pond will be decreased by 3 square feet if its length is decreased by 2 ft. and breadth is increased by 1 ft. Its area will be increased by 4 square feet if the length is increased by 1 ft. and breadth remains same. Find the dimensions of the pond. (2014)
Hide | Show
જવાબ : Let, length of rectangular pond = x
breadth of rectangular pond = y
Area of rectangular pond = xy
According to Question,
Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 22
=> Length of rectangular pond = 7 ft.
Breadth of rectangular pond = 4 ft.
On reversing the digits of a two digit number, number obtained is 9 less than three times the original number. If difference of these two numbers is 45, find the original number. (2014)
Hide | Show
જવાબ : Let unit’s place digit be x and ten’s place digit bey.
Original number = x + 10y Reversed number = 10x + y
According to the Question,
10x + y = 3(x + 10y) – 9
10x + y = 3x + 30y – 9
10x + y – 3x – 30y = -9
7x – 29y = -9 …(A)
10x + y – (x + 10y) = 45
9x – 9y = 45
x – y = 5 …[Dividing both sides by 9
x – 5 + y …(B)
Solving (
A),
7x – 29y = -9
7(5 + y) – 29y = -9 …[From (B)
35+ 7y – 29y = -9
-22y = -9 – 35
-22y = -44
y = 44/22 = 2
Putting the value of y in (B),
x = 5 + 2 = 7
Original number = x + 10y
= 7 + 10(2) = 27
Speed of a boat in still water is 15 km/h. It goes 30 km upstream and returns back at the same point in 4 hours 30 minutes. Find the speed of the stream. 2017D
Hide | Show
જવાબ : Let, speed of stream = x km/hr
Speed of boat in still water = 15 km/hr
then, the speed of the boat upstream = (15 – x) km/hr
and the speed of the boat downstream = (15 + x) km/hr
Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 23
=> Speed of stream = 5 km/hr
The owner of a taxi company decides to run all the taxis on CNG fuel instead of petrol/diesel. The taxi charges in city comprises of fixed charges together with the charge for the distance covered. For a journey of 12 km, the charge paid is 789 and for journey of 20 km, the charge paid is ₹145.
What will a person have to pay for travelling a distance of 30 km? (2014)
Hide | Show
જવાબ : Let the fixed charges = 7x
and the charge per km = ₹y
According to the Question,
Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 24
Putting the value of y in (A), we get
x + 12(7) = 89
x + 84 = 89 x = 89 – 84 = 5
Total fare for 30 km = x + 30y = 5 + 30(7)
= 5 + 210 = ₹215
A boat takes 4 hours to go 44 km downstream and it can go 20 km upstream in the same time. Find the speed of the stream and that of the boat in still water. (2015)
Hide | Show
જવાબ : Let the speed of the stream = y km/hr
Let the speed of boat in still water = x km/hr
then, the speed of the boat in downstream = (x + y) km/hr
and, the speed of the boat in upstream = (x – y) km/hr
Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 25
From (A), x = 11 – 3 = 8
Speed of the stream, y =3 km/hr
Speed of the boat in still water, x = 8 km/hr
A man travels 300 km partly by train and partly by car. He takes 4 hours if the travels 60 km by train and the rest by car. If he travels 100 km by train and the remaining by car, he takes 10 minutes longer. Find the speeds of the train and the car separately. (2017D)
Hide | Show
જવાબ : Let, the speed of train = x km/hr
the speed of car = y km/ hr
Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 26
=> Speed of the train = 60 km/hr
and Speed of the car = 80 kn/hr
The owner of a taxi company decides to run all the taxis on CNG fuel instead of petrol/diesel. The taxi charges in city comprises of fixed charges together with the charge for the distance covered. For a journey of 13 km, the charge paid is ₹129 and for a journey of 22 km, the charge paid is ₹210.
What will a person have to pay for travelling a distance of 32 km? (2014 )
Hide | Show
જવાબ : Let fixed charge be ₹x and the charge for the distance = ₹y per km
According to the Question,
For a journey of 13 km,
x + 13y = 129 x = 129 – 13y …(A)
For a journey of 22 km, x + 22y = 210 …(B)
129 – 13y + 22y = 210 …[From (A)
9y = 210 – 129 = 81
9y = 81 y = 9
From (A), x = 129 – 13(9)
= 129 – 117 = 12
Fixed charge, x = ₹12
The charge for the distance, y = ₹9 per km
To pay for travelling a distance of 32 km
= x + 32y = 12 + 32(9) = 12 + 288 = ₹300
Solve the following pair of linear equations graphically:
x + 3y = 6 ; 2x – 3y = 12
Also find the area of the triangle formed by the lines representing the given equations with y-axis. (2012, 2015)
Hide | Show
જવાબ : Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 27
By plotting points and joining them, the lines intersesct at A(6, 0)
x = 6, y = 0
Line x + 3y = 6 intersects Y-axis at B(0, 2) and Line 2x – 3y = 12 intersects Y-axis at C(0, -4). Therefore, Area of triangle formed by the lines with y-axis.
Area of triangle
= ½ base × corresponding altitude
= ½ × BC × AO = ½ × 6 × 6 = 18 sq. units
Draw the graphs of following equations:
2x – y = 1; x + 2y = 13
Find the solution of the equations from the graph and shade the triangular region formed by the lines and the y-axis. (2013)
Hide | Show
જવાબ : Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 28
By plotting the points and joining the lines, they intersect at A(3,5).
=> x = 3, y = 5
∆ABC is the required triangle.
Draw the graphs of the equations x – y + 1 = 0 and 3x + 2y – 12 = 0. Determine the coordinates of the vertices of the triangle formed by these lines and x-axis. (2012, 2017D)
Hide | Show
જવાબ : Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 29
Lines intersect at (2, 3)
=> x = 2, y = 3
A(2, 3), B(-1, 0) and C(4, 0) are Vertices of ∆ABC
Amit bought two pencils and three chocolates for ₹11 and Sumeet bought one pencil and two chocolates for ₹7. Represent this situation in the form of a pair of linear equations. Find the price of one pencil and that of one chocolate graphically. (2017OD)
Hide | Show
જવાબ : Let the price of one pencil = ₹x and the price of one chocolate = ₹y.
As per the Question,
Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 30
Lines intersect at (1, 3).
=> x = 1, y = 3
Therefore the price of one pencil = ₹1
& price of one chocolate = ₹3
7x – 5y – 4 = 0 is given. Write another linear equation, so that the lines represented by the pair are:
(i) intersecting
(ii) coincident
(iii) parallel (2015 OD)
Hide | Show
જવાબ : 7x – 5y – 4 = 0
Important Questions for Class 10 Maths Chapter 3 Pair of Linear Equations in Two Variables 31
The cost of 2 kg of orange and 1kg of grapes on a day was found to be Rs.160. After a month, the cost of 4 kg of oranges and 2 kg of grapes is Rs.300. Represent the situation algebraically.
Hide | Show
જવાબ : Let the cost of 1 kg of oranges be ‘Rs. x’.
And, let the cost of 1 kg of grapes be ‘Rs. y’.
According to the question, the algebraic representation is
2x + y = 160
And 4x + 2y = 300
For, 2x + y = 160 or y = 160 − 2x, the solution table is;
X
50
60
70
Y
60
40
20
For 4x + 2y = 300 or y = (300 – 4x)/ 2, the solution table is;
X
70
80
75
Y
10
-10
0
Half the perimeter of a rectangular garden, whose length is 4 m more than its width, is 36 m. Find the dimensions for f the garden.
Hide | Show
જવાબ : Given, half the perimeter of a rectangular garden = 36 m
so, 2(l + b)/2 = 36
(l + b) = 36 ……….(1)
Given, the length is 4 m more than its width.
Let width = x
And length = x + 4
Substituting this in eq(1), we get;
x + x + 4 = 36
2x + 4 = 36
2x = 32
x = 16
Therefore, the width is 16 m and the length is 16 + 4 = 20 m.
On comparing the ratios a1/a2, b1/b2, and c1/c2, find out whether the following pair of linear equations are consistent, or inconsistent.
(i) 3x + 2y = 5 ; 2x – 3y = 7
(ii) 2x – 3y = 8 ; 4x – 6y = 9
Hide | Show
જવાબ : (i) Given : 3x + 2y = 5 or 3x + 2y – 5 = 0
and 2x – 3y = 7 or 2x – 3y – 7 = 0
Comparing the above equations with a1x + b1y + c1=0
And a2x + b2y + c2 = 0
We get,
a= 3, b= 2, c= -5
a= 2, b= -3, c= -7
a1/a= 3/2, b1/b= 2/-3, c1/c= -5/-7 = 5/7
Since, a1/a2≠b1/bthe lines intersect each other at a point and have only one possible solution.
Hence, the equations are consistent.
(ii) Given 2x – 3y = 8 and 4x – 6y = 9
Therefore,
a= 2, b= -3, c= -8
a= 4, b= -6, c= -9
a1/a= 2/4 = 1/2, b1/b= -3/-6 = 1/2, c1/c= -8/-9 = 8/9
Since, a1/a2=b1/b2≠c1/c2
Therefore, the lines are parallel to each other and they have no possible solution. Hence, the equations are inconsistent.
Solve the following pair of linear equations by the substitution method.
(i) x + y = 14
x – y = 4
(ii) 3x – y = 3
9x – 3y = 9
Hide | Show
જવાબ : (i) Given,
x + y = 14 and x – y = 4 are the two equations.
From 1st equation, we get,
x = 14 – y
Now, put the value of x in second equation to get,
(14 – y) – y = 4
14 – 2y = 4
2y = 10
Or y = 5
By the value of y, we can now find the value of x;
x = 14 – y
x = 14 – 5
Or x = 9
Hence, x = 9 and y = 5.
(ii) Given,
3x – y = 3 and 9x – 3y = 9 are the two equations.
From 1st equation, we get,
x = (3 + y)/3
Now, substitute the value of x in the given second equation to get,
9[(3 + y)/3] – 3y = 9
3(3+y) – 3y = 9
9 + 3y – 3y = 9
9 = 9
Therefore, y has infinite values and since, x = (3 + y)/3, so x also has infinite values.
Solve 2x + 3y = 11 and 2x – 4y = – 24 and hence find the value of ‘l’ for which y = lx + 3.
Hide | Show
જવાબ : 2x + 3y = 11…………………………..(i)
2x – 4y = -24………………………… (ii)
From equation (ii), we get;
x = (11 – 3y)/2 ……….…………………………..(iii)
Putting the value of x in equation (ii), we get
2[(11 – 3y)/2] – 4y = −24
11 – 3y – 4y = -24
-7y = -35
y = 5……………………………………..(iv)
Putting the value of y in equation (iii), we get;
x = (11 – 15)/2 = -4/2 = −2
Hence, x = -2, y = 5
Also,
y = lx + 3
5 = -2l +3
-2l = 2
l = -1
Therefore, the value of l is -1
The coach of a cricket team buys 7 bats and 6 pair of pads for Rs.3800. Later, she buys 3 bats and 5pair of pads for Rs.1750. Find the cost of each bat and each pair of pad.
Hide | Show
જવાબ : Let the cost of a bat be x and the cost of a pair of pad be y.
According to the question,
7x + 6y = 3800 ………………. (i)
3x + 5y = 1750 ………………. (ii)
From (i), we get;
y = (3800 – 7x)/6 …………………… (iii)
Substituting (iii) in (ii). we get,
3x + 5[(3800 – 7x)/6] = 1750
3x + (9500/3) – (35x/6) = 1750
3x – (35x/6) = 1750 – (9500/3)
(18x – 35x)/6 = (5250 – 9500)/3
-17x/6 = -4250/3
-17x = -8500
x = 500
Putting the value of x in (iii), we get;
y = (3800 – 7 × 500)/6 = 300/6 = 50
Hence, the cost of a bat is Rs 500 and cost of pair of pad is Rs 50
There are No Content Availble For this Chapter
Coordinates of points on line x+y=0
Co-ordintes of x
Co-ordinates of y
1
1
A
-7
2
3
B
-1
3
5
C
-3
4
7
D
-5
Hide | Show
જવાબ :
1-B, 2-C, 3-D, 4-A
Coordinates of points on line x-y=0
Co-ordintes of x
Co-ordinates of y
1
1
A
7
2
3
B
5
3
5
C
3
4
7
D
1
Hide | Show
જવાબ :
1-D, 2-C, 3-B, 4-A
Coordinates of points on line 2x+y=0
Co-ordintes of x
Co-ordinates of y
1
4
A
-4
2
-6
B
-2
3
8
C
5
4
-10
D
3
Hide | Show
જવાબ :
1-B,2-D,3-A, 4-C
Coordinates of points on line x+2y=0
Co-ordintes of x
Co-ordinates of y
1
-8
A
1
2
-2
B
-2
3
6
C
-3
4
4
D
4
Hide | Show
જવાબ :
1-D, 2-A, 3-C, 4-B
Coordinates of points on line 2x-y=0
Co-ordintes of x
Co-ordinates of y
1
2
A
6
2
3
B
10
3
4
C
4
4
5
D
8
Hide | Show
જવાબ :
1-C, 2-A, 3-D, 4-B
Coordinates of points on line 2x= -3y
Co-ordintes of x
Co-ordinates of y
1
-9
A
2
2
-3
B
4
3
-12
C
6
4
-6
D
8
Hide | Show
જવાબ :
1-C, 2-A, 3-D, 4-B
Coordinates of points on line x-3y=0
Co-ordintes of x
Co-ordinates of y
1
6
A
1
2
9
B
2
3
12
C
3
4
3
D
4
Hide | Show
જવાબ :
1-B, 2-C, 3-D, 4-A
Coordinates of points on line 2x-3y=0
Co-ordintes of x
Co-ordinates of y
1
3
A
8
2
6
B
6
3
9
C
4
4
12
D
2
Hide | Show
જવાબ :
1-D, 2-C, 3-B, 4-A
Coordinates of points on line 2x-3y=0
Co-ordintes of x
Co-ordinates of y
1
3
A
8
2
6
B
6
3
9
C
4
4
12
D
2
Hide | Show
જવાબ :
1-D, 2-C, 3-B, 4-A
Coordinates of points on line x-5y=0
Co-ordintes of x
Co-ordinates of y
1
40
A
5
2
25
B
6
3
35
C
7
4
30
D
8
Hide | Show
જવાબ :
1-D, 2-A, 3-C, 4-B
Download PDF
Take a Test
Choose your Test :
Pair of Linear Equations in Two Variables
Math
Chapter 03 : Pair of Linear Equations in Two Variables
Browse & Download CBSE Books For Class 10 All Subjects
The GSEB Books for class 10 are designed as per the syllabus followed Gujarat Secondary and Higher Secondary Education Board provides key detailed, and a through solutions to all the questions relating to the GSEB textbooks.
The purpose is to provide help to the students with their homework, preparing for the examinations and personal learning. These books are very helpful for the preparation of examination.
For more details about the GSEB books for Class 10, you can access the PDF which is as in the above given links for the same.
|
__label__pos
| 0.992504 |
back to article Single chip photon source brings quantum comms closer
Down at the “basic research” level, there's a lot the labs can accomplish with quantum mechanics: entanglement, information teleportation, simple quantum computations and more. Now, an international collaboration believes it's brought exploitation of quantum effects closer to a commercial development. The researchers have …
COMMENTS
This topic is closed for new posts.
Silver badge
As I understand it * ....
... you can't use entanglement to send information between two places instantaneously (in the sense of sending a bit stream representing the letter 'A', for example), but you can use it to send information between two places (at the speed of photons) in such a way that nobody can intercept or listen in to the data transfer without the end parties being aware that this is happening. Hence its potential for use in very secure data networks.
*From snippets of info I read or heard somewhere.
0
0
Bronze badge
Yeah, I've heard of quantum teleportation before but I'm pretty sure they specifically said it couldn't be used to transmit information. Where's the author getting this from?
0
0
This topic is closed for new posts.
Forums
|
__label__pos
| 0.975506 |
ピボットパネルの概要
右下のピボットパネルは、ピボットテーブルのフィールドを追加、削除、ドラッグ、および移動するために使用します。フィールド、領域、およびビューマネージャーで構成され、容易にカスタマイズが可能です。
ピボットパネルには、次の4つの領域があります: フィルター: ピボットテーブルのデータ範囲を制御します。 列: ピボットテーブルの列の配分を制御します。 行: ピボットテーブルの行の配分を制御します。 値: ピボットテーブルの集計データと集計方法を制御します。 ピボタルパネルを次のように作成します: そして、"panel"要素のwidthとheightを設定する必要があります。 GC.Spread.Pivot.PivotPanelコンストラクタのパラメーターは次のとおりです: Param Type Description name string ピボットパネルの名前 pivotTable GC.Spread.Pivot.PivotTable 関連するピボットテーブル host HTMLDivElement ピボットパネルをホストするコンテナ ピボットパネルはピボットテーブルを制御するツールにすぎず、fromJSONメソッドを使うと自動的に破棄されます。 ピボットテーブルはピボットパネルがなくても機能します。そのため、ピボットテーブルは、ピボットパネルと連携する以下のAPIをサポートします。 ピボットパネルをピボットテーブルに接続します: ピボットパネルをピボットテーブルから切り離します: ピボットパネルを破棄します:
window.onload = function () { var spread = new GC.Spread.Sheets.Workbook(_getElementById('ss'), { sheetCount: 2 }); initSpread(spread); var pivotLayoutSheet = spread.getSheet(0); initPivotTable(pivotLayoutSheet); }; function initSpread(spread) { spread.suspendPaint(); let sheet = spread.getSheet(1); sheet.name("DataSource"); sheet.setRowCount(117); sheet.setColumnWidth(0, 120); sheet.getCell(-1, 0).formatter("YYYY-mm-DD"); sheet.getRange(-1,4,0,2).formatter("$ #,##0"); sheet.setArray(0, 0, pivotSales); let table = sheet.tables.add('tableSales', 0, 0, 117, 6); for(let i=2;i<=117;i++) { sheet.setFormula(i-1,5,'=D'+i+'*E'+i) } table.style(GC.Spread.Sheets.Tables.TableThemes["none"]); let sheet0 = spread.getSheet(0); sheet0.name("PivotLayout"); spread.resumePaint(); } function initPivotTable(sheet) { let myPivotTable = sheet.pivotTables.add("myPivotTable", "tableSales", 1, 1, GC.Spread.Pivot.PivotTableLayoutType.outline, GC.Spread.Pivot.PivotTableThemes.light8); myPivotTable.suspendLayout(); myPivotTable.options.showRowHeader = true; myPivotTable.options.showColumnHeader = true; myPivotTable.add("salesperson", "Salesperson", GC.Spread.Pivot.PivotTableFieldType.rowField); myPivotTable.add("car", "Cars", GC.Spread.Pivot.PivotTableFieldType.rowField); myPivotTable.add("date", "Date", GC.Spread.Pivot.PivotTableFieldType.columnField); let groupInfo = { originFieldName: "date", dateGroups: [{ by: GC.Pivot.DateGroupType.quarters }] }; myPivotTable.group(groupInfo); myPivotTable.add("total", "Totals", GC.Spread.Pivot.PivotTableFieldType.valueField, GC.Pivot.SubtotalType.sum); var panel = new GC.Spread.Pivot.PivotPanel("myPivotPanel", myPivotTable, document.getElementById("panel")); panel.sectionVisibility(GC.Spread.Pivot.PivotPanelSection.fields + GC.Spread.Pivot.PivotPanelSection.area); myPivotTable.resumeLayout(); myPivotTable.autoFitColumn(); } function _getElementById(id) { return document.getElementById(id); }
<!doctype html> <html style="height:100%;font-size:14px;"> <head> <meta name="spreadjs culture" content="ja-jp" /> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <link rel="stylesheet" type="text/css" href="$DEMOROOT$/ja/purejs/node_modules/@grapecity/spread-sheets/styles/gc.spread.sheets.excel2013white.css"> <script src="$DEMOROOT$/ja/purejs/node_modules/@grapecity/spread-sheets/dist/gc.spread.sheets.all.min.js" type="text/javascript"></script> <script src="$DEMOROOT$/ja/purejs/node_modules/@grapecity/spread-sheets-shapes/dist/gc.spread.sheets.shapes.min.js" type="text/javascript"></script> <script src="$DEMOROOT$/ja/purejs/node_modules/@grapecity/spread-sheets-pivot-addon/dist/gc.spread.pivot.pivottables.min.js" type="text/javascript"></script> <script src="$DEMOROOT$/ja/purejs/node_modules/@grapecity/spread-sheets-resources-ja/dist/gc.spread.sheets.resources.ja.min.js" type="text/javascript"></script> <script src="$DEMOROOT$/spread/source/data/pivot-data.js" type="text/javascript"></script> <script src="$DEMOROOT$/spread/source/js/license.js" type="text/javascript"></script> <script src="app.js" type="text/javascript"></script> <link rel="stylesheet" type="text/css" href="styles.css"> </head> <body> <div class="sample-tutorial"> <div id="ss" class="sample-spreadsheets"></div> <div class="sample-panel"> <div id="panel"></div> </div> </div> </body> </html>
.sample-tutorial { position: relative; height: 100%; } .sample-spreadsheets { width: calc(100% - 300px); height: 100%; overflow: hidden; float: left; } body { position: absolute; top: 0; bottom: 0; left: 0; right: 0; overflow: hidden; } .sample-panel { float: right; width: 300px; padding: 12px; height: 100%; box-sizing: border-box; background: #fbfbfb; overflow: auto; } .gc-panel { padding: 10px; background-color: rgb(230, 230, 230); } #panel { position: absolute; right: 0; width: 300px; height: 100%; top: 0; } #app { height: 100%; } .gc-panel .gc-panel-header h2 { font-size: 14px; } .gc-panel-fields-section span{ font-size: 12px; }
|
__label__pos
| 0.730453 |
WordPress.org
Forums
Why can't i put a XML template in wordpress? (4 posts)
1. mathias1992
Member
Posted 3 years ago #
When i try to put a XML template on my root directory through FTP my wordpress site
gives a 404 not found error.
Do someone know why this happen?
thanks.
2. s_ha_dum
Member
Posted 3 years ago #
Do you get the error when you try to FTP the file in or after you FTP it in and try to visit your site?
3. mathias1992
Member
Posted 3 years ago #
it gives the error when i visit my site
after i putted the XML template on my ftp account (my site)
4. s_ha_dum
Member
Posted 3 years ago #
WordPress generally ignores files that don't conflict with it. What is the name of the file and what is in it? I am assuming you are putting it in your server root?
Do you have an URL?
Topic Closed
This topic has been closed to new replies.
About this Topic
|
__label__pos
| 0.99968 |
10 Times Table
Learn here the 10 times table. Ten times table 10 x 1 = 10, 10 x 2 = 20, 10 x 3 = 30, 10 x 4 = 40, 10 x 5 = 50, 10 x 6 = 60, 10 x 7 = 70, 10 x 8 = 80, 10 x 9 = 90, 10 x 10 = 100
Hello, math enthusiasts! Are you ready to embark on another exciting mathematical adventure? Today, we're diving into the world of the 10 times table.
Whether you're a curious learner or someone looking to strengthen your multiplication skills, get ready for an illuminating journey!
So, let's explore the wonders of the 10 times table together and unlock its hidden treasures.
The Marvel of Multiplying by 10
The 10 times table holds a special place in the world of multiplication. It's where we discover the power of multiplying numbers by 10 and how it influences our understanding of place value.
Get ready to witness the magic and simplicity of this fascinating table!
The Magnificent Zeroes
In the 10 times table, we encounter a delightful pattern when we multiply any number by 10. The result is always a number with a zero at the end.
For instance, 10 times 1 is 10, 10 times 2 is 20, 10 times 3 is 30, and so on. It's like adding a touch of magic to our multiplication, where the zeroes add a sense of order and place value.
Tips and Tricks
Let's equip ourselves with some handy tips and tricks to make working with the 10 times table even more enjoyable!
• The Zero Magic: Just like in previous tables, when we multiply any number by 0, the result is always 0. So, if you come across 10 times 0, remember that the answer is a magical 0. Zero always has a way of surprising us!
• The Power of Place Value: Multiplying a number by 10 simply means moving all the digits one place to the left, adding a zero in the empty space. This is because multiplying by 10 increases the place value of each digit by one. Understanding place value helps us visualize and simplify calculations.
• The Shortcut of Division: Dividing a number by 10 is the same as multiplying it by 0.1. This is because dividing by 10 moves all the digits one place to the right, effectively reducing their place value by one. This shortcut comes in handy when dealing with decimals and understanding the relationship between multiplication and division.
Real-Life Connections
Now, let's explore how the 10 times table connects to our everyday lives. From counting objects to solving practical problems, the 10 times table proves its relevance time and time again!
• Counting Money: Imagine you have 4 stacks of 10-dollar bills, with each stack containing 10 bills. By utilizing the 10 times table, you can quickly calculate that you have a total of 40 bills, which amounts to $400. It's like multiplying the number of stacks by the number of bills in each stack to find the total value of your money.
• Time Calculation: Let's say you have a cooking recipe that requires 15 minutes of baking time, and you want to know how long it will take if you bake it for 10 rounds. By using the 10 times table, you can find out that you'll spend a total of 150 minutes baking. Time to savor the delicious results!
Ten Multiplication Table
Read, Repeat and Learn Ten times table and Check yourself by giving a test below
10 Multiplication Table
Also, check Times Tables
10 Times Table Chart
10 times table chart
Table of 10
Table of 10
10 Times table Test
Multiplication of 10
Reverse Multiplication of 10
Shuffled Multiplication of 10
How much is 10 multiplied by other numbers?
About
PrintableMultiplicationTable.net allows you to create Multiplication Worksheets, in PDF formats for Grade 1, Grade 2, Grade 3, Grade 4 and Grade 5 freely and easily, no registration required.
With Printable Tools, you can create custom worksheets, tables, lined pages, grid pages and download in pdf format.
In addition, you can also create addition, subtraction worksheets for students.
You can also set the change the heading and subheading before taking print. Each worksheet is generated with a dynamic numbers.
Printable Tools
Multiplication Worksheets
Addition Worksheets
Subtraction Worksheets
Times Tables
Lined Pages
Grid pages
Blank Printable Chart
Download Blank Printable Chart
Times Tables
Learn Times Table 1 - 120
Multiplication Chart
Download Multiplication Chart
@2023 PrintableMultiplicationTable.net
|
__label__pos
| 0.994284 |
Math
If two angles of one triangle are congruent to two angles of another triangle, these triangles must be
a) scalene
b) similar
c) congruent
d) isosceles
Dont all traiangles have 180 degrees? So if two angles were congruent, what about the third?
I still don't get it. I would guess c)congruent?
b)
asked by Becky
Respond to this Question
First Name
Your Answer
Similar Questions
1. Math Question
Which one of the following pairs of triangle must be similar? a) two isosceles triangles with congruent vertex angles b) two right triangles c) two scalene triangles with congruent bases d) two obtuse triangles a) if the two
2. math
1. What is the correct classification for the triangle shown below? A triangle has two angles measuring 68 degrees and 22 degrees. (1 point) acute, scalene acute, isosceles --- right, scalene obtuse, scalene 2. What is the value
3. Math
Triangle ABC has coordinates A(-4,-2), B(0,-2), and C(-4,1). Triangle DEF has coordinates D(3,6), E(3,2), and F(0,2). Use coordinate geometry to determine if these two triangles are congruent. If they are not congruent, state the
4. Math, please help me.
This is the last question of my quiz, I need help with it. I need just one question answered. Question: The angles opposite the congruent sides of an isosceles triangle are congruent. Find the value of x in the triangle. Show all
5. help me pls geometry
Which tirangles must be similar 1. Two right triangles with a congruent acute angle 2. Two obtuse angles 3 Two isosceles triangles with congruent bases 4. Two scalene triangles with a congruent angle
6. math
A transversal intersects two parallel lines and forms eight angles. Which of the following statements is false? a)alternative interior angles are always congruent. b)corresponding angles are always congruent. c)adjacent interior
7. Geometry
31.Identify the Hypothesis of the statement below. If you are riding on a train, then it will stop at every station on the line. A.You are riding on a train B.It will stop at every station on the line C.You are not riding on a
8. oops geometry
This conditional statement is true... If two angles are right angles, then they are congruent. The converse is ...If they are congruent, then the angles are right angles This would be false because not all congruent angles are
9. math
This conditional statement is true... If two angles are right angles, then they are congruent. The converse is ...If they are congruent, then the angles are right angles This would be false because not all congruent angles are
10. math
A transversal intersects two parallel lines and forms eight angles. Which of the following statements is false? a)alternative interior angles are always congruent. b)corresponding angles are always congruent. c)adjacent interior
More Similar Questions
|
__label__pos
| 0.999196 |
Bungie Name Guide
d2_Cross-Play_Banner.jpg
A Bungie Name is a player's identity used across Bungie's games, websites, and services, and is a part of Cross Play for Destiny 2
All Bungie Names include a hash and numeric ID, and will look similar to the below example:
• "Bungie Name": PlayerName#1234
• "Display Name": PlayerName
• "Hash": #
• "Numeric ID": 1234
Creating a Bungie Name
Bungie Names are auto-created when players first log in to Destiny 2 using a player's platform name. New players can choose their Bungie Name by changing their platform name prior to logging into Destiny 2 for the first time. Numeric IDs are random and cannot be chosen.
Names are processed through Bungie's character filtration and offensive term moderation system. Names that are against Bungie's policies, or any character that can't be displayed in-game or can't be typed into the player search box via console virtual keyboards, will be removed. Players with a platform name that is longer than 26 characters will have their Bungie Name shortened to a maximum of 26 characters.
If an auto-created name becomes empty or becomes offensive due to our filtration process, it will be changed to "Guardian[Random Number]."
Platform account names can still be viewed in-game by hovering over a player in the Roster screen of the Director.
PLEASE NOTE!
Typing in Unicode characters, alternate keyboard characters, or emojis may result in unexpected issues, such as being unable to be located when using Search, as well as display issues. Players should keep in mind that the following are not recommended for use in a Bungie Name due to the above issues:
• Blank characters
• Unicode characters outside game-supported languages
• Emojis
• Punctuation outside game-supported languages
For more information, please view our Text-based Language Filtering FAQ.
Changing a Bungie Name
Players have one (1) Bungie Name change available to them that can be used on Bungie.net by following the steps below:
1. Log in to Bungie.net with a platform account
2. Go to Profile Settings
3. Update the Bungie Name under "Identity Settings"
4. Press the "Save" button
A player's platform account(s) will appear as a recommendation when attempting to change a Bungie Name. These can be chosen as long as they don't violate our policies or Code of Conduct. The Numeric ID in a Bungie Name cannot be manually changed and will only update when a name is changed, then the Numeric ID will automatically update to a random set of numbers.
Banned Name Policies
Any Bungie Name that violates these policies or our Code of Conduct will be changed to "Guardian[Random Number]" and players may face an in-game ban. Changes performed by Bungie may consume a player's unused name change.
Policies include, but aren't limited to:
• An auto-created name that becomes empty or becomes offensive due to our filtration process
• Mentions of Bungie, BNG, etc. or impersonation of a Bungie employee
• Personal identifying information (i.e. first and last names, email addresses, phone numbers, etc.)
• Hate speech
• Implicit or explicit sexual terms
• Website addresses
• Profanity
Bungie reserves the right to moderate and remove players in our games and websites for any reason at our own discretion.
|
__label__pos
| 0.610952 |
blob: ad5f6b593df45f01360f3daa8b37d024ee793e9e [file] [log] [blame]
/*
* Totally braindamaged mbox splitter program.
*
* It just splits a mbox into a list of files: "0001" "0002" ..
* so you can process them further from there.
*/
#include "cache.h"
#include "builtin.h"
#include "string-list.h"
static const char git_mailsplit_usage[] =
"git mailsplit [-d<prec>] [-f<n>] [-b] -o<directory> [<mbox>|<Maildir>...]";
static int is_from_line(const char *line, int len)
{
const char *colon;
if (len < 20 || memcmp("From ", line, 5))
return 0;
colon = line + len - 2;
line += 5;
for (;;) {
if (colon < line)
return 0;
if (*--colon == ':')
break;
}
if (!isdigit(colon[-4]) ||
!isdigit(colon[-2]) ||
!isdigit(colon[-1]) ||
!isdigit(colon[ 1]) ||
!isdigit(colon[ 2]))
return 0;
/* year */
if (strtol(colon+3, NULL, 10) <= 90)
return 0;
/* Ok, close enough */
return 1;
}
/* Could be as small as 64, enough to hold a Unix "From " line. */
static char buf[4096];
/* We cannot use fgets() because our lines can contain NULs */
int read_line_with_nul(char *buf, int size, FILE *in)
{
int len = 0, c;
for (;;) {
c = getc(in);
if (c == EOF)
break;
buf[len++] = c;
if (c == '\n' || len + 1 >= size)
break;
}
buf[len] = '\0';
return len;
}
/* Called with the first line (potentially partial)
* already in buf[] -- normally that should begin with
* the Unix "From " line. Write it into the specified
* file.
*/
static int split_one(FILE *mbox, const char *name, int allow_bare)
{
FILE *output = NULL;
int len = strlen(buf);
int fd;
int status = 0;
int is_bare = !is_from_line(buf, len);
if (is_bare && !allow_bare)
goto corrupt;
fd = open(name, O_WRONLY | O_CREAT | O_EXCL, 0666);
if (fd < 0)
die_errno("cannot open output file '%s'", name);
output = fdopen(fd, "w");
/* Copy it out, while searching for a line that begins with
* "From " and having something that looks like a date format.
*/
for (;;) {
int is_partial = len && buf[len-1] != '\n';
if (fwrite(buf, 1, len, output) != len)
die_errno("cannot write output");
len = read_line_with_nul(buf, sizeof(buf), mbox);
if (len == 0) {
if (feof(mbox)) {
status = 1;
break;
}
die_errno("cannot read mbox");
}
if (!is_partial && !is_bare && is_from_line(buf, len))
break; /* done with one message */
}
fclose(output);
return status;
corrupt:
if (output)
fclose(output);
unlink(name);
fprintf(stderr, "corrupt mailbox\n");
exit(1);
}
static int populate_maildir_list(struct string_list *list, const char *path)
{
DIR *dir;
struct dirent *dent;
char name[PATH_MAX];
char *subs[] = { "cur", "new", NULL };
char **sub;
for (sub = subs; *sub; ++sub) {
snprintf(name, sizeof(name), "%s/%s", path, *sub);
if ((dir = opendir(name)) == NULL) {
if (errno == ENOENT)
continue;
error("cannot opendir %s (%s)", name, strerror(errno));
return -1;
}
while ((dent = readdir(dir)) != NULL) {
if (dent->d_name[0] == '.')
continue;
snprintf(name, sizeof(name), "%s/%s", *sub, dent->d_name);
string_list_insert(name, list);
}
closedir(dir);
}
return 0;
}
static int split_maildir(const char *maildir, const char *dir,
int nr_prec, int skip)
{
char file[PATH_MAX];
char name[PATH_MAX];
int ret = -1;
int i;
struct string_list list = {NULL, 0, 0, 1};
if (populate_maildir_list(&list, maildir) < 0)
goto out;
for (i = 0; i < list.nr; i++) {
FILE *f;
snprintf(file, sizeof(file), "%s/%s", maildir, list.items[i].string);
f = fopen(file, "r");
if (!f) {
error("cannot open mail %s (%s)", file, strerror(errno));
goto out;
}
if (fgets(buf, sizeof(buf), f) == NULL) {
error("cannot read mail %s (%s)", file, strerror(errno));
goto out;
}
sprintf(name, "%s/%0*d", dir, nr_prec, ++skip);
split_one(f, name, 1);
fclose(f);
}
ret = skip;
out:
string_list_clear(&list, 1);
return ret;
}
static int split_mbox(const char *file, const char *dir, int allow_bare,
int nr_prec, int skip)
{
char name[PATH_MAX];
int ret = -1;
int peek;
FILE *f = !strcmp(file, "-") ? stdin : fopen(file, "r");
int file_done = 0;
if (!f) {
error("cannot open mbox %s", file);
goto out;
}
do {
peek = fgetc(f);
} while (isspace(peek));
ungetc(peek, f);
if (fgets(buf, sizeof(buf), f) == NULL) {
/* empty stdin is OK */
if (f != stdin) {
error("cannot read mbox %s", file);
goto out;
}
file_done = 1;
}
while (!file_done) {
sprintf(name, "%s/%0*d", dir, nr_prec, ++skip);
file_done = split_one(f, name, allow_bare);
}
if (f != stdin)
fclose(f);
ret = skip;
out:
return ret;
}
int cmd_mailsplit(int argc, const char **argv, const char *prefix)
{
int nr = 0, nr_prec = 4, num = 0;
int allow_bare = 0;
const char *dir = NULL;
const char **argp;
static const char *stdin_only[] = { "-", NULL };
for (argp = argv+1; *argp; argp++) {
const char *arg = *argp;
if (arg[0] != '-')
break;
/* do flags here */
if ( arg[1] == 'd' ) {
nr_prec = strtol(arg+2, NULL, 10);
if (nr_prec < 3 || 10 <= nr_prec)
usage(git_mailsplit_usage);
continue;
} else if ( arg[1] == 'f' ) {
nr = strtol(arg+2, NULL, 10);
} else if ( arg[1] == 'b' && !arg[2] ) {
allow_bare = 1;
} else if ( arg[1] == 'o' && arg[2] ) {
dir = arg+2;
} else if ( arg[1] == '-' && !arg[2] ) {
argp++; /* -- marks end of options */
break;
} else {
die("unknown option: %s", arg);
}
}
if ( !dir ) {
/* Backwards compatibility: if no -o specified, accept
<mbox> <dir> or just <dir> */
switch (argc - (argp-argv)) {
case 1:
dir = argp[0];
argp = stdin_only;
break;
case 2:
stdin_only[0] = argp[0];
dir = argp[1];
argp = stdin_only;
break;
default:
usage(git_mailsplit_usage);
}
} else {
/* New usage: if no more argument, parse stdin */
if ( !*argp )
argp = stdin_only;
}
while (*argp) {
const char *arg = *argp++;
struct stat argstat;
int ret = 0;
if (arg[0] == '-' && arg[1] == 0) {
ret = split_mbox(arg, dir, allow_bare, nr_prec, nr);
if (ret < 0) {
error("cannot split patches from stdin");
return 1;
}
num += (ret - nr);
nr = ret;
continue;
}
if (stat(arg, &argstat) == -1) {
error("cannot stat %s (%s)", arg, strerror(errno));
return 1;
}
if (S_ISDIR(argstat.st_mode))
ret = split_maildir(arg, dir, nr_prec, nr);
else
ret = split_mbox(arg, dir, allow_bare, nr_prec, nr);
if (ret < 0) {
error("cannot split patches from %s", arg);
return 1;
}
num += (ret - nr);
nr = ret;
}
printf("%d\n", num);
return 0;
}
|
__label__pos
| 0.999814 |
Excel Formula: Match and Return Data using VLOOKUP
Formula for Excel that checks for matches using VLOOKUP and returns data from a specific column
Formula Generator | 9 months ago
In Excel, you can use the IF, AND, and VLOOKUP functions together to check for matches between two columns and return data from a specific column. This formula is useful when you have a dataset and want to find matching values in two columns, and if there is a match, retrieve data from another column. The formula uses the VLOOKUP function to search for values in the specified columns and the IF and AND functions to determine if both conditions are true. If there is a match, the formula returns the corresponding value from the desired column; otherwise, it returns 'No Match'. This guide will explain how to construct and use this formula in Excel.
The requested formula is:
=IF(AND(G4=VLOOKUP(G4,C:C,1,FALSE), H4=VLOOKUP(H4,E:E,1,FALSE)), VLOOKUP(G4,D:D,1,FALSE), "No Match")
Explanation:
This formula uses the IF function along with the AND function and VLOOKUP function to achieve the desired result.
1. The VLOOKUP function is used to search for a value in a specified column and return a corresponding value from another column in the same row.
2. In this formula, the first VLOOKUP function VLOOKUP(G4,C:C,1,FALSE) is used to check if the value in cell G4 exists in column C. The C:C represents the entire column C. The 1 indicates that we want to return the value from the first column of the lookup range. The FALSE parameter ensures an exact match.
3. Similarly, the second VLOOKUP function VLOOKUP(H4,E:E,1,FALSE) is used to check if the value in cell H4 exists in column E.
4. The AND function is used to check if both conditions are true. If both conditions are true, it returns TRUE; otherwise, it returns FALSE.
5. The IF function is used to determine the final result. If the AND function returns TRUE, it means both values exist in the respective columns. In that case, the formula uses another VLOOKUP function VLOOKUP(G4,D:D,1,FALSE) to return the corresponding value from column D. The D:D represents the entire column D. If the AND function returns FALSE, it means either one or both values do not exist, and the formula returns "No Match".
Examples:
Let's consider the following data:
| G | H | D |
|-------|-------|-------|
| 1 | A | 100 |
| 2 | B | 200 |
| 3 | C | 300 |
Example 1: If G4 = 2 and H4 = B, the formula will check if 2 exists in column C and B exists in column E. Since both conditions are true, the formula will return the corresponding value from column D, which is 200.
Example 2: If G4 = 4 and H4 = D, the formula will check if 4 exists in column C and D exists in column E. Since the condition in column C is false, the formula will return "No Match".
Example 3: If G4 = 1 and H4 = A, the formula will check if 1 exists in column C and A exists in column E. Since both conditions are true, the formula will return the corresponding value from column D, which is 100.
This article was generated with AI. AI can make mistakes, consider checking important information.
|
__label__pos
| 0.999429 |
Stack Overflow is a community of 4.7 million programmers, just like you, helping each other.
Join them; it only takes a minute:
Sign up
Join the Stack Overflow community to:
1. Ask programming questions
2. Answer and help your peers
3. Get recognized for your expertise
Can I restart the Concurrency Agent object after it done his work?
share|improve this question
Short answer is No.
If you look at the life cycle described here, you'll see the following:
Agents have a set life cycle. The concurrency::agent_status enumeration defines the various states of an agent. The following illustration is a state diagram that shows how agents progress from one state to another. In this illustration, solid lines represent methods that you call from your application; dotted lines represent methods that are called from the runtime.
image
This shows clearly that once your agent has entered the done or cancelled state, there's no way back.
Also, if you look at the agent::start documentation, you see this:
Moves an agent from the agent_created state to the agent_runnable state, and schedules it for execution.
and this:
An agent that has been canceled cannot be started.
Although this doesn't mention the done state, I've found from experience that once it's done, it's done. The state sequence diagram shows a one-way trip for all paths.
share|improve this answer
Thank you for answer. – 23W Dec 17 '13 at 8:20
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.813673 |
Convert from ISO-8859-1 to UTF-8. See aa812e8.
[tinycc.git] / tccgen.c
blob3da1143a2d29d543404218bdd2010d16be60c717
1 /*
2 * TCC - Tiny C Compiler
3 *
4 * Copyright (c) 2001-2004 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "tcc.h"
23 /********************************************************/
24 /* global variables */
26 /* loc : local variable index
27 ind : output code index
28 rsym: return symbol
29 anon_sym: anonymous symbol index
31 ST_DATA int rsym, anon_sym, ind, loc;
33 ST_DATA Sym *sym_free_first;
34 ST_DATA void **sym_pools;
35 ST_DATA int nb_sym_pools;
37 ST_DATA Sym *global_stack;
38 ST_DATA Sym *local_stack;
39 ST_DATA Sym *define_stack;
40 ST_DATA Sym *global_label_stack;
41 ST_DATA Sym *local_label_stack;
42 static int local_scope;
43 static int in_sizeof;
44 static int section_sym;
46 ST_DATA int vlas_in_scope; /* number of VLAs that are currently in scope */
47 ST_DATA int vla_sp_root_loc; /* vla_sp_loc for SP before any VLAs were pushed */
48 ST_DATA int vla_sp_loc; /* Pointer to variable holding location to store stack pointer on the stack when modifying stack pointer */
50 ST_DATA SValue __vstack[1+VSTACK_SIZE], *vtop, *pvtop;
52 ST_DATA int const_wanted; /* true if constant wanted */
53 ST_DATA int nocode_wanted; /* no code generation wanted */
54 #define NODATA_WANTED (nocode_wanted > 0) /* no static data output wanted either */
55 #define STATIC_DATA_WANTED (nocode_wanted & 0xC0000000) /* only static data output */
56 ST_DATA int global_expr; /* true if compound literals must be allocated globally (used during initializers parsing */
57 ST_DATA CType func_vt; /* current function return type (used by return instruction) */
58 ST_DATA int func_var; /* true if current function is variadic (used by return instruction) */
59 ST_DATA int func_vc;
60 ST_DATA int last_line_num, last_ind, func_ind; /* debug last line number and pc */
61 ST_DATA const char *funcname;
62 ST_DATA int g_debug;
64 ST_DATA CType char_pointer_type, func_old_type, int_type, size_type;
66 ST_DATA struct switch_t {
67 struct case_t {
68 int64_t v1, v2;
69 int sym;
70 } **p; int n; /* list of case ranges */
71 int def_sym; /* default symbol */
72 } *cur_switch; /* current switch */
74 /* ------------------------------------------------------------------------- */
76 static void gen_cast(CType *type);
77 static void gen_cast_s(int t);
78 static inline CType *pointed_type(CType *type);
79 static int is_compatible_types(CType *type1, CType *type2);
80 static int parse_btype(CType *type, AttributeDef *ad);
81 static CType *type_decl(CType *type, AttributeDef *ad, int *v, int td);
82 static void parse_expr_type(CType *type);
83 static void init_putv(CType *type, Section *sec, unsigned long c);
84 static void decl_initializer(CType *type, Section *sec, unsigned long c, int first, int size_only);
85 static void block(int *bsym, int *csym, int is_expr);
86 static void decl_initializer_alloc(CType *type, AttributeDef *ad, int r, int has_init, int v, int scope);
87 static void decl(int l);
88 static int decl0(int l, int is_for_loop_init, Sym *);
89 static void expr_eq(void);
90 static void vla_runtime_type_size(CType *type, int *a);
91 static void vla_sp_restore(void);
92 static void vla_sp_restore_root(void);
93 static int is_compatible_unqualified_types(CType *type1, CType *type2);
94 static inline int64_t expr_const64(void);
95 static void vpush64(int ty, unsigned long long v);
96 static void vpush(CType *type);
97 static int gvtst(int inv, int t);
98 static void gen_inline_functions(TCCState *s);
99 static void skip_or_save_block(TokenString **str);
100 static void gv_dup(void);
102 ST_INLN int is_float(int t)
104 int bt;
105 bt = t & VT_BTYPE;
106 return bt == VT_LDOUBLE || bt == VT_DOUBLE || bt == VT_FLOAT || bt == VT_QFLOAT;
109 /* we use our own 'finite' function to avoid potential problems with
110 non standard math libs */
111 /* XXX: endianness dependent */
112 ST_FUNC int ieee_finite(double d)
114 int p[4];
115 memcpy(p, &d, sizeof(double));
116 return ((unsigned)((p[1] | 0x800fffff) + 1)) >> 31;
119 ST_FUNC void test_lvalue(void)
121 if (!(vtop->r & VT_LVAL))
122 expect("lvalue");
125 ST_FUNC void check_vstack(void)
127 if (pvtop != vtop)
128 tcc_error("internal compiler error: vstack leak (%d)", vtop - pvtop);
131 /* ------------------------------------------------------------------------- */
132 /* vstack debugging aid */
134 #if 0
135 void pv (const char *lbl, int a, int b)
137 int i;
138 for (i = a; i < a + b; ++i) {
139 SValue *p = &vtop[-i];
140 printf("%s vtop[-%d] : type.t:%04x r:%04x r2:%04x c.i:%d\n",
141 lbl, i, p->type.t, p->r, p->r2, (int)p->c.i);
144 #endif
146 /* ------------------------------------------------------------------------- */
147 /* start of translation unit info */
148 ST_FUNC void tcc_debug_start(TCCState *s1)
150 if (s1->do_debug) {
151 char buf[512];
153 /* file info: full path + filename */
154 section_sym = put_elf_sym(symtab_section, 0, 0,
155 ELFW(ST_INFO)(STB_LOCAL, STT_SECTION), 0,
156 text_section->sh_num, NULL);
157 getcwd(buf, sizeof(buf));
158 #ifdef _WIN32
159 normalize_slashes(buf);
160 #endif
161 pstrcat(buf, sizeof(buf), "/");
162 put_stabs_r(buf, N_SO, 0, 0,
163 text_section->data_offset, text_section, section_sym);
164 put_stabs_r(file->filename, N_SO, 0, 0,
165 text_section->data_offset, text_section, section_sym);
166 last_ind = 0;
167 last_line_num = 0;
170 /* an elf symbol of type STT_FILE must be put so that STB_LOCAL
171 symbols can be safely used */
172 put_elf_sym(symtab_section, 0, 0,
173 ELFW(ST_INFO)(STB_LOCAL, STT_FILE), 0,
174 SHN_ABS, file->filename);
177 /* put end of translation unit info */
178 ST_FUNC void tcc_debug_end(TCCState *s1)
180 if (!s1->do_debug)
181 return;
182 put_stabs_r(NULL, N_SO, 0, 0,
183 text_section->data_offset, text_section, section_sym);
187 /* generate line number info */
188 ST_FUNC void tcc_debug_line(TCCState *s1)
190 if (!s1->do_debug)
191 return;
192 if ((last_line_num != file->line_num || last_ind != ind)) {
193 put_stabn(N_SLINE, 0, file->line_num, ind - func_ind);
194 last_ind = ind;
195 last_line_num = file->line_num;
199 /* put function symbol */
200 ST_FUNC void tcc_debug_funcstart(TCCState *s1, Sym *sym)
202 char buf[512];
204 if (!s1->do_debug)
205 return;
207 /* stabs info */
208 /* XXX: we put here a dummy type */
209 snprintf(buf, sizeof(buf), "%s:%c1",
210 funcname, sym->type.t & VT_STATIC ? 'f' : 'F');
211 put_stabs_r(buf, N_FUN, 0, file->line_num, 0,
212 cur_text_section, sym->c);
213 /* //gr gdb wants a line at the function */
214 put_stabn(N_SLINE, 0, file->line_num, 0);
216 last_ind = 0;
217 last_line_num = 0;
220 /* put function size */
221 ST_FUNC void tcc_debug_funcend(TCCState *s1, int size)
223 if (!s1->do_debug)
224 return;
225 put_stabn(N_FUN, 0, 0, size);
228 /* ------------------------------------------------------------------------- */
229 ST_FUNC int tccgen_compile(TCCState *s1)
231 cur_text_section = NULL;
232 funcname = "";
233 anon_sym = SYM_FIRST_ANOM;
234 section_sym = 0;
235 const_wanted = 0;
236 nocode_wanted = 0x80000000;
238 /* define some often used types */
239 int_type.t = VT_INT;
240 char_pointer_type.t = VT_BYTE;
241 mk_pointer(&char_pointer_type);
242 #if PTR_SIZE == 4
243 size_type.t = VT_INT;
244 #else
245 size_type.t = VT_LLONG;
246 #endif
247 func_old_type.t = VT_FUNC;
248 func_old_type.ref = sym_push(SYM_FIELD, &int_type, 0, 0);
249 func_old_type.ref->f.func_call = FUNC_CDECL;
250 func_old_type.ref->f.func_type = FUNC_OLD;
252 tcc_debug_start(s1);
254 #ifdef TCC_TARGET_ARM
255 arm_init(s1);
256 #endif
258 #ifdef INC_DEBUG
259 printf("%s: **** new file\n", file->filename);
260 #endif
262 parse_flags = PARSE_FLAG_PREPROCESS | PARSE_FLAG_TOK_NUM | PARSE_FLAG_TOK_STR;
263 next();
264 decl(VT_CONST);
265 if (tok != TOK_EOF)
266 expect("declaration");
268 gen_inline_functions(s1);
269 check_vstack();
270 /* end of translation unit info */
271 tcc_debug_end(s1);
272 return 0;
275 /* ------------------------------------------------------------------------- */
276 /* apply storage attributes to Elf symbol */
278 static void update_storage(Sym *sym)
280 ElfW(Sym) *esym;
281 if (0 == sym->c)
282 return;
283 esym = &((ElfW(Sym) *)symtab_section->data)[sym->c];
284 if (sym->a.visibility)
285 esym->st_other = (esym->st_other & ~ELFW(ST_VISIBILITY)(-1))
286 | sym->a.visibility;
287 if (sym->a.weak)
288 esym->st_info = ELFW(ST_INFO)(STB_WEAK, ELFW(ST_TYPE)(esym->st_info));
289 #ifdef TCC_TARGET_PE
290 if (sym->a.dllimport)
291 esym->st_other |= ST_PE_IMPORT;
292 if (sym->a.dllexport)
293 esym->st_other |= ST_PE_EXPORT;
294 #endif
295 #if 0
296 printf("storage %s: vis=%d weak=%d exp=%d imp=%d\n",
297 get_tok_str(sym->v, NULL),
298 sym->a.visibility,
299 sym->a.weak,
300 sym->a.dllexport,
301 sym->a.dllimport
303 #endif
306 /* ------------------------------------------------------------------------- */
307 /* update sym->c so that it points to an external symbol in section
308 'section' with value 'value' */
310 ST_FUNC void put_extern_sym2(Sym *sym, Section *section,
311 addr_t value, unsigned long size,
312 int can_add_underscore)
314 int sym_type, sym_bind, sh_num, info, other, t;
315 ElfW(Sym) *esym;
316 const char *name;
317 char buf1[256];
318 #ifdef CONFIG_TCC_BCHECK
319 char buf[32];
320 #endif
322 if (section == NULL)
323 sh_num = SHN_UNDEF;
324 else if (section == SECTION_ABS)
325 sh_num = SHN_ABS;
326 else
327 sh_num = section->sh_num;
329 if (!sym->c) {
330 name = get_tok_str(sym->v, NULL);
331 #ifdef CONFIG_TCC_BCHECK
332 if (tcc_state->do_bounds_check) {
333 /* XXX: avoid doing that for statics ? */
334 /* if bound checking is activated, we change some function
335 names by adding the "__bound" prefix */
336 switch(sym->v) {
337 #ifdef TCC_TARGET_PE
338 /* XXX: we rely only on malloc hooks */
339 case TOK_malloc:
340 case TOK_free:
341 case TOK_realloc:
342 case TOK_memalign:
343 case TOK_calloc:
344 #endif
345 case TOK_memcpy:
346 case TOK_memmove:
347 case TOK_memset:
348 case TOK_strlen:
349 case TOK_strcpy:
350 case TOK_alloca:
351 strcpy(buf, "__bound_");
352 strcat(buf, name);
353 name = buf;
354 break;
357 #endif
358 t = sym->type.t;
359 if ((t & VT_BTYPE) == VT_FUNC) {
360 sym_type = STT_FUNC;
361 } else if ((t & VT_BTYPE) == VT_VOID) {
362 sym_type = STT_NOTYPE;
363 } else {
364 sym_type = STT_OBJECT;
366 if (t & VT_STATIC)
367 sym_bind = STB_LOCAL;
368 else
369 sym_bind = STB_GLOBAL;
370 other = 0;
371 #ifdef TCC_TARGET_PE
372 if (sym_type == STT_FUNC && sym->type.ref) {
373 Sym *ref = sym->type.ref;
374 if (ref->f.func_call == FUNC_STDCALL && can_add_underscore) {
375 sprintf(buf1, "_%s@%d", name, ref->f.func_args * PTR_SIZE);
376 name = buf1;
377 other |= ST_PE_STDCALL;
378 can_add_underscore = 0;
381 #endif
382 if (tcc_state->leading_underscore && can_add_underscore) {
383 buf1[0] = '_';
384 pstrcpy(buf1 + 1, sizeof(buf1) - 1, name);
385 name = buf1;
387 if (sym->asm_label)
388 name = get_tok_str(sym->asm_label, NULL);
389 info = ELFW(ST_INFO)(sym_bind, sym_type);
390 sym->c = set_elf_sym(symtab_section, value, size, info, other, sh_num, name);
391 } else {
392 esym = &((ElfW(Sym) *)symtab_section->data)[sym->c];
393 esym->st_value = value;
394 esym->st_size = size;
395 esym->st_shndx = sh_num;
397 update_storage(sym);
400 ST_FUNC void put_extern_sym(Sym *sym, Section *section,
401 addr_t value, unsigned long size)
403 put_extern_sym2(sym, section, value, size, 1);
406 /* add a new relocation entry to symbol 'sym' in section 's' */
407 ST_FUNC void greloca(Section *s, Sym *sym, unsigned long offset, int type,
408 addr_t addend)
410 int c = 0;
412 if (nocode_wanted && s == cur_text_section)
413 return;
415 if (sym) {
416 if (0 == sym->c)
417 put_extern_sym(sym, NULL, 0, 0);
418 c = sym->c;
421 /* now we can add ELF relocation info */
422 put_elf_reloca(symtab_section, s, offset, type, c, addend);
425 #if PTR_SIZE == 4
426 ST_FUNC void greloc(Section *s, Sym *sym, unsigned long offset, int type)
428 greloca(s, sym, offset, type, 0);
430 #endif
432 /* ------------------------------------------------------------------------- */
433 /* symbol allocator */
434 static Sym *__sym_malloc(void)
436 Sym *sym_pool, *sym, *last_sym;
437 int i;
439 sym_pool = tcc_malloc(SYM_POOL_NB * sizeof(Sym));
440 dynarray_add(&sym_pools, &nb_sym_pools, sym_pool);
442 last_sym = sym_free_first;
443 sym = sym_pool;
444 for(i = 0; i < SYM_POOL_NB; i++) {
445 sym->next = last_sym;
446 last_sym = sym;
447 sym++;
449 sym_free_first = last_sym;
450 return last_sym;
453 static inline Sym *sym_malloc(void)
455 Sym *sym;
456 #ifndef SYM_DEBUG
457 sym = sym_free_first;
458 if (!sym)
459 sym = __sym_malloc();
460 sym_free_first = sym->next;
461 return sym;
462 #else
463 sym = tcc_malloc(sizeof(Sym));
464 return sym;
465 #endif
468 ST_INLN void sym_free(Sym *sym)
470 #ifndef SYM_DEBUG
471 sym->next = sym_free_first;
472 sym_free_first = sym;
473 #else
474 tcc_free(sym);
475 #endif
478 /* push, without hashing */
479 ST_FUNC Sym *sym_push2(Sym **ps, int v, int t, int c)
481 Sym *s;
483 s = sym_malloc();
484 memset(s, 0, sizeof *s);
485 s->v = v;
486 s->type.t = t;
487 s->c = c;
488 /* add in stack */
489 s->prev = *ps;
490 *ps = s;
491 return s;
494 /* find a symbol and return its associated structure. 's' is the top
495 of the symbol stack */
496 ST_FUNC Sym *sym_find2(Sym *s, int v)
498 while (s) {
499 if (s->v == v)
500 return s;
501 else if (s->v == -1)
502 return NULL;
503 s = s->prev;
505 return NULL;
508 /* structure lookup */
509 ST_INLN Sym *struct_find(int v)
511 v -= TOK_IDENT;
512 if ((unsigned)v >= (unsigned)(tok_ident - TOK_IDENT))
513 return NULL;
514 return table_ident[v]->sym_struct;
517 /* find an identifier */
518 ST_INLN Sym *sym_find(int v)
520 v -= TOK_IDENT;
521 if ((unsigned)v >= (unsigned)(tok_ident - TOK_IDENT))
522 return NULL;
523 return table_ident[v]->sym_identifier;
526 /* push a given symbol on the symbol stack */
527 ST_FUNC Sym *sym_push(int v, CType *type, int r, int c)
529 Sym *s, **ps;
530 TokenSym *ts;
532 if (local_stack)
533 ps = &local_stack;
534 else
535 ps = &global_stack;
536 s = sym_push2(ps, v, type->t, c);
537 s->type.ref = type->ref;
538 s->r = r;
539 /* don't record fields or anonymous symbols */
540 /* XXX: simplify */
541 if (!(v & SYM_FIELD) && (v & ~SYM_STRUCT) < SYM_FIRST_ANOM) {
542 /* record symbol in token array */
543 ts = table_ident[(v & ~SYM_STRUCT) - TOK_IDENT];
544 if (v & SYM_STRUCT)
545 ps = &ts->sym_struct;
546 else
547 ps = &ts->sym_identifier;
548 s->prev_tok = *ps;
549 *ps = s;
550 s->sym_scope = local_scope;
551 if (s->prev_tok && s->prev_tok->sym_scope == s->sym_scope)
552 tcc_error("redeclaration of '%s'",
553 get_tok_str(v & ~SYM_STRUCT, NULL));
555 return s;
558 /* push a global identifier */
559 ST_FUNC Sym *global_identifier_push(int v, int t, int c)
561 Sym *s, **ps;
562 s = sym_push2(&global_stack, v, t, c);
563 /* don't record anonymous symbol */
564 if (v < SYM_FIRST_ANOM) {
565 ps = &table_ident[v - TOK_IDENT]->sym_identifier;
566 /* modify the top most local identifier, so that
567 sym_identifier will point to 's' when popped */
568 while (*ps != NULL)
569 ps = &(*ps)->prev_tok;
570 s->prev_tok = NULL;
571 *ps = s;
573 return s;
576 /* pop symbols until top reaches 'b'. If KEEP is non-zero don't really
577 pop them yet from the list, but do remove them from the token array. */
578 ST_FUNC void sym_pop(Sym **ptop, Sym *b, int keep)
580 Sym *s, *ss, **ps;
581 TokenSym *ts;
582 int v;
584 s = *ptop;
585 while(s != b) {
586 ss = s->prev;
587 v = s->v;
588 /* remove symbol in token array */
589 /* XXX: simplify */
590 if (!(v & SYM_FIELD) && (v & ~SYM_STRUCT) < SYM_FIRST_ANOM) {
591 ts = table_ident[(v & ~SYM_STRUCT) - TOK_IDENT];
592 if (v & SYM_STRUCT)
593 ps = &ts->sym_struct;
594 else
595 ps = &ts->sym_identifier;
596 *ps = s->prev_tok;
598 if (!keep)
599 sym_free(s);
600 s = ss;
602 if (!keep)
603 *ptop = b;
606 /* ------------------------------------------------------------------------- */
608 static void vsetc(CType *type, int r, CValue *vc)
610 int v;
612 if (vtop >= vstack + (VSTACK_SIZE - 1))
613 tcc_error("memory full (vstack)");
614 /* cannot let cpu flags if other instruction are generated. Also
615 avoid leaving VT_JMP anywhere except on the top of the stack
616 because it would complicate the code generator.
618 Don't do this when nocode_wanted. vtop might come from
619 !nocode_wanted regions (see 88_codeopt.c) and transforming
620 it to a register without actually generating code is wrong
621 as their value might still be used for real. All values
622 we push under nocode_wanted will eventually be popped
623 again, so that the VT_CMP/VT_JMP value will be in vtop
624 when code is unsuppressed again.
626 Same logic below in vswap(); */
627 if (vtop >= vstack && !nocode_wanted) {
628 v = vtop->r & VT_VALMASK;
629 if (v == VT_CMP || (v & ~1) == VT_JMP)
630 gv(RC_INT);
633 vtop++;
634 vtop->type = *type;
635 vtop->r = r;
636 vtop->r2 = VT_CONST;
637 vtop->c = *vc;
638 vtop->sym = NULL;
641 ST_FUNC void vswap(void)
643 SValue tmp;
644 /* cannot vswap cpu flags. See comment at vsetc() above */
645 if (vtop >= vstack && !nocode_wanted) {
646 int v = vtop->r & VT_VALMASK;
647 if (v == VT_CMP || (v & ~1) == VT_JMP)
648 gv(RC_INT);
650 tmp = vtop[0];
651 vtop[0] = vtop[-1];
652 vtop[-1] = tmp;
655 /* pop stack value */
656 ST_FUNC void vpop(void)
658 int v;
659 v = vtop->r & VT_VALMASK;
660 #if defined(TCC_TARGET_I386) || defined(TCC_TARGET_X86_64)
661 /* for x86, we need to pop the FP stack */
662 if (v == TREG_ST0) {
663 o(0xd8dd); /* fstp %st(0) */
664 } else
665 #endif
666 if (v == VT_JMP || v == VT_JMPI) {
667 /* need to put correct jump if && or || without test */
668 gsym(vtop->c.i);
670 vtop--;
673 /* push constant of type "type" with useless value */
674 ST_FUNC void vpush(CType *type)
676 vset(type, VT_CONST, 0);
679 /* push integer constant */
680 ST_FUNC void vpushi(int v)
682 CValue cval;
683 cval.i = v;
684 vsetc(&int_type, VT_CONST, &cval);
687 /* push a pointer sized constant */
688 static void vpushs(addr_t v)
690 CValue cval;
691 cval.i = v;
692 vsetc(&size_type, VT_CONST, &cval);
695 /* push arbitrary 64bit constant */
696 ST_FUNC void vpush64(int ty, unsigned long long v)
698 CValue cval;
699 CType ctype;
700 ctype.t = ty;
701 ctype.ref = NULL;
702 cval.i = v;
703 vsetc(&ctype, VT_CONST, &cval);
706 /* push long long constant */
707 static inline void vpushll(long long v)
709 vpush64(VT_LLONG, v);
712 ST_FUNC void vset(CType *type, int r, int v)
714 CValue cval;
716 cval.i = v;
717 vsetc(type, r, &cval);
720 static void vseti(int r, int v)
722 CType type;
723 type.t = VT_INT;
724 type.ref = NULL;
725 vset(&type, r, v);
728 ST_FUNC void vpushv(SValue *v)
730 if (vtop >= vstack + (VSTACK_SIZE - 1))
731 tcc_error("memory full (vstack)");
732 vtop++;
733 *vtop = *v;
736 static void vdup(void)
738 vpushv(vtop);
741 /* rotate n first stack elements to the bottom
742 I1 ... In -> I2 ... In I1 [top is right]
744 ST_FUNC void vrotb(int n)
746 int i;
747 SValue tmp;
749 tmp = vtop[-n + 1];
750 for(i=-n+1;i!=0;i++)
751 vtop[i] = vtop[i+1];
752 vtop[0] = tmp;
755 /* rotate the n elements before entry e towards the top
756 I1 ... In ... -> In I1 ... I(n-1) ... [top is right]
758 ST_FUNC void vrote(SValue *e, int n)
760 int i;
761 SValue tmp;
763 tmp = *e;
764 for(i = 0;i < n - 1; i++)
765 e[-i] = e[-i - 1];
766 e[-n + 1] = tmp;
769 /* rotate n first stack elements to the top
770 I1 ... In -> In I1 ... I(n-1) [top is right]
772 ST_FUNC void vrott(int n)
774 vrote(vtop, n);
777 /* push a symbol value of TYPE */
778 static inline void vpushsym(CType *type, Sym *sym)
780 CValue cval;
781 cval.i = 0;
782 vsetc(type, VT_CONST | VT_SYM, &cval);
783 vtop->sym = sym;
786 /* Return a static symbol pointing to a section */
787 ST_FUNC Sym *get_sym_ref(CType *type, Section *sec, unsigned long offset, unsigned long size)
789 int v;
790 Sym *sym;
792 v = anon_sym++;
793 sym = global_identifier_push(v, type->t | VT_STATIC, 0);
794 sym->type.ref = type->ref;
795 sym->r = VT_CONST | VT_SYM;
796 put_extern_sym(sym, sec, offset, size);
797 return sym;
800 /* push a reference to a section offset by adding a dummy symbol */
801 static void vpush_ref(CType *type, Section *sec, unsigned long offset, unsigned long size)
803 vpushsym(type, get_sym_ref(type, sec, offset, size));
806 /* define a new external reference to a symbol 'v' of type 'u' */
807 ST_FUNC Sym *external_global_sym(int v, CType *type, int r)
809 Sym *s;
811 s = sym_find(v);
812 if (!s) {
813 /* push forward reference */
814 s = global_identifier_push(v, type->t | VT_EXTERN, 0);
815 s->type.ref = type->ref;
816 s->r = r | VT_CONST | VT_SYM;
818 return s;
821 /* Merge some storage attributes. */
822 static void patch_storage(Sym *sym, AttributeDef *ad, CType *type)
824 if (type && !is_compatible_types(&sym->type, type))
825 tcc_error("incompatible types for redefinition of '%s'",
826 get_tok_str(sym->v, NULL));
827 #ifdef TCC_TARGET_PE
828 if (sym->a.dllimport != ad->a.dllimport)
829 tcc_error("incompatible dll linkage for redefinition of '%s'",
830 get_tok_str(sym->v, NULL));
831 #endif
832 sym->a.dllexport |= ad->a.dllexport;
833 sym->a.weak |= ad->a.weak;
834 if (ad->a.visibility) {
835 int vis = sym->a.visibility;
836 int vis2 = ad->a.visibility;
837 if (vis == STV_DEFAULT)
838 vis = vis2;
839 else if (vis2 != STV_DEFAULT)
840 vis = (vis < vis2) ? vis : vis2;
841 sym->a.visibility = vis;
843 if (ad->a.aligned)
844 sym->a.aligned = ad->a.aligned;
845 if (ad->asm_label)
846 sym->asm_label = ad->asm_label;
847 update_storage(sym);
850 /* define a new external reference to a symbol 'v' */
851 static Sym *external_sym(int v, CType *type, int r, AttributeDef *ad)
853 Sym *s;
854 s = sym_find(v);
855 if (!s) {
856 /* push forward reference */
857 s = sym_push(v, type, r | VT_CONST | VT_SYM, 0);
858 s->type.t |= VT_EXTERN;
859 s->a = ad->a;
860 s->sym_scope = 0;
861 } else {
862 if (s->type.ref == func_old_type.ref) {
863 s->type.ref = type->ref;
864 s->r = r | VT_CONST | VT_SYM;
865 s->type.t |= VT_EXTERN;
867 patch_storage(s, ad, type);
869 return s;
872 /* push a reference to global symbol v */
873 ST_FUNC void vpush_global_sym(CType *type, int v)
875 vpushsym(type, external_global_sym(v, type, 0));
878 /* save registers up to (vtop - n) stack entry */
879 ST_FUNC void save_regs(int n)
881 SValue *p, *p1;
882 for(p = vstack, p1 = vtop - n; p <= p1; p++)
883 save_reg(p->r);
886 /* save r to the memory stack, and mark it as being free */
887 ST_FUNC void save_reg(int r)
889 save_reg_upstack(r, 0);
892 /* save r to the memory stack, and mark it as being free,
893 if seen up to (vtop - n) stack entry */
894 ST_FUNC void save_reg_upstack(int r, int n)
896 int l, saved, size, align;
897 SValue *p, *p1, sv;
898 CType *type;
900 if ((r &= VT_VALMASK) >= VT_CONST)
901 return;
902 if (nocode_wanted)
903 return;
905 /* modify all stack values */
906 saved = 0;
907 l = 0;
908 for(p = vstack, p1 = vtop - n; p <= p1; p++) {
909 if ((p->r & VT_VALMASK) == r ||
910 ((p->type.t & VT_BTYPE) == VT_LLONG && (p->r2 & VT_VALMASK) == r)) {
911 /* must save value on stack if not already done */
912 if (!saved) {
913 /* NOTE: must reload 'r' because r might be equal to r2 */
914 r = p->r & VT_VALMASK;
915 /* store register in the stack */
916 type = &p->type;
917 if ((p->r & VT_LVAL) ||
918 (!is_float(type->t) && (type->t & VT_BTYPE) != VT_LLONG))
919 #if PTR_SIZE == 8
920 type = &char_pointer_type;
921 #else
922 type = &int_type;
923 #endif
924 size = type_size(type, &align);
925 loc = (loc - size) & -align;
926 sv.type.t = type->t;
927 sv.r = VT_LOCAL | VT_LVAL;
928 sv.c.i = loc;
929 store(r, &sv);
930 #if defined(TCC_TARGET_I386) || defined(TCC_TARGET_X86_64)
931 /* x86 specific: need to pop fp register ST0 if saved */
932 if (r == TREG_ST0) {
933 o(0xd8dd); /* fstp %st(0) */
935 #endif
936 #if PTR_SIZE == 4
937 /* special long long case */
938 if ((type->t & VT_BTYPE) == VT_LLONG) {
939 sv.c.i += 4;
940 store(p->r2, &sv);
942 #endif
943 l = loc;
944 saved = 1;
946 /* mark that stack entry as being saved on the stack */
947 if (p->r & VT_LVAL) {
948 /* also clear the bounded flag because the
949 relocation address of the function was stored in
950 p->c.i */
951 p->r = (p->r & ~(VT_VALMASK | VT_BOUNDED)) | VT_LLOCAL;
952 } else {
953 p->r = lvalue_type(p->type.t) | VT_LOCAL;
955 p->r2 = VT_CONST;
956 p->c.i = l;
961 #ifdef TCC_TARGET_ARM
962 /* find a register of class 'rc2' with at most one reference on stack.
963 * If none, call get_reg(rc) */
964 ST_FUNC int get_reg_ex(int rc, int rc2)
966 int r;
967 SValue *p;
969 for(r=0;r<NB_REGS;r++) {
970 if (reg_classes[r] & rc2) {
971 int n;
972 n=0;
973 for(p = vstack; p <= vtop; p++) {
974 if ((p->r & VT_VALMASK) == r ||
975 (p->r2 & VT_VALMASK) == r)
976 n++;
978 if (n <= 1)
979 return r;
982 return get_reg(rc);
984 #endif
986 /* find a free register of class 'rc'. If none, save one register */
987 ST_FUNC int get_reg(int rc)
989 int r;
990 SValue *p;
992 /* find a free register */
993 for(r=0;r<NB_REGS;r++) {
994 if (reg_classes[r] & rc) {
995 if (nocode_wanted)
996 return r;
997 for(p=vstack;p<=vtop;p++) {
998 if ((p->r & VT_VALMASK) == r ||
999 (p->r2 & VT_VALMASK) == r)
1000 goto notfound;
1002 return r;
1004 notfound: ;
1007 /* no register left : free the first one on the stack (VERY
1008 IMPORTANT to start from the bottom to ensure that we don't
1009 spill registers used in gen_opi()) */
1010 for(p=vstack;p<=vtop;p++) {
1011 /* look at second register (if long long) */
1012 r = p->r2 & VT_VALMASK;
1013 if (r < VT_CONST && (reg_classes[r] & rc))
1014 goto save_found;
1015 r = p->r & VT_VALMASK;
1016 if (r < VT_CONST && (reg_classes[r] & rc)) {
1017 save_found:
1018 save_reg(r);
1019 return r;
1022 /* Should never comes here */
1023 return -1;
1026 /* move register 's' (of type 't') to 'r', and flush previous value of r to memory
1027 if needed */
1028 static void move_reg(int r, int s, int t)
1030 SValue sv;
1032 if (r != s) {
1033 save_reg(r);
1034 sv.type.t = t;
1035 sv.type.ref = NULL;
1036 sv.r = s;
1037 sv.c.i = 0;
1038 load(r, &sv);
1042 /* get address of vtop (vtop MUST BE an lvalue) */
1043 ST_FUNC void gaddrof(void)
1045 vtop->r &= ~VT_LVAL;
1046 /* tricky: if saved lvalue, then we can go back to lvalue */
1047 if ((vtop->r & VT_VALMASK) == VT_LLOCAL)
1048 vtop->r = (vtop->r & ~(VT_VALMASK | VT_LVAL_TYPE)) | VT_LOCAL | VT_LVAL;
1053 #ifdef CONFIG_TCC_BCHECK
1054 /* generate lvalue bound code */
1055 static void gbound(void)
1057 int lval_type;
1058 CType type1;
1060 vtop->r &= ~VT_MUSTBOUND;
1061 /* if lvalue, then use checking code before dereferencing */
1062 if (vtop->r & VT_LVAL) {
1063 /* if not VT_BOUNDED value, then make one */
1064 if (!(vtop->r & VT_BOUNDED)) {
1065 lval_type = vtop->r & (VT_LVAL_TYPE | VT_LVAL);
1066 /* must save type because we must set it to int to get pointer */
1067 type1 = vtop->type;
1068 vtop->type.t = VT_PTR;
1069 gaddrof();
1070 vpushi(0);
1071 gen_bounded_ptr_add();
1072 vtop->r |= lval_type;
1073 vtop->type = type1;
1075 /* then check for dereferencing */
1076 gen_bounded_ptr_deref();
1079 #endif
1081 static void incr_bf_adr(int o)
1083 vtop->type = char_pointer_type;
1084 gaddrof();
1085 vpushi(o);
1086 gen_op('+');
1087 vtop->type.t = (vtop->type.t & ~(VT_BTYPE|VT_DEFSIGN))
1088 | (VT_BYTE|VT_UNSIGNED);
1089 vtop->r = (vtop->r & ~VT_LVAL_TYPE)
1090 | (VT_LVAL_BYTE|VT_LVAL_UNSIGNED|VT_LVAL);
1093 /* single-byte load mode for packed or otherwise unaligned bitfields */
1094 static void load_packed_bf(CType *type, int bit_pos, int bit_size)
1096 int n, o, bits;
1097 save_reg_upstack(vtop->r, 1);
1098 vpush64(type->t & VT_BTYPE, 0); // B X
1099 bits = 0, o = bit_pos >> 3, bit_pos &= 7;
1100 do {
1101 vswap(); // X B
1102 incr_bf_adr(o);
1103 vdup(); // X B B
1104 n = 8 - bit_pos;
1105 if (n > bit_size)
1106 n = bit_size;
1107 if (bit_pos)
1108 vpushi(bit_pos), gen_op(TOK_SHR), bit_pos = 0; // X B Y
1109 if (n < 8)
1110 vpushi((1 << n) - 1), gen_op('&');
1111 gen_cast(type);
1112 if (bits)
1113 vpushi(bits), gen_op(TOK_SHL);
1114 vrotb(3); // B Y X
1115 gen_op('|'); // B X
1116 bits += n, bit_size -= n, o = 1;
1117 } while (bit_size);
1118 vswap(), vpop();
1119 if (!(type->t & VT_UNSIGNED)) {
1120 n = ((type->t & VT_BTYPE) == VT_LLONG ? 64 : 32) - bits;
1121 vpushi(n), gen_op(TOK_SHL);
1122 vpushi(n), gen_op(TOK_SAR);
1126 /* single-byte store mode for packed or otherwise unaligned bitfields */
1127 static void store_packed_bf(int bit_pos, int bit_size)
1129 int bits, n, o, m, c;
1131 c = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1132 vswap(); // X B
1133 save_reg_upstack(vtop->r, 1);
1134 bits = 0, o = bit_pos >> 3, bit_pos &= 7;
1135 do {
1136 incr_bf_adr(o); // X B
1137 vswap(); //B X
1138 c ? vdup() : gv_dup(); // B V X
1139 vrott(3); // X B V
1140 if (bits)
1141 vpushi(bits), gen_op(TOK_SHR);
1142 if (bit_pos)
1143 vpushi(bit_pos), gen_op(TOK_SHL);
1144 n = 8 - bit_pos;
1145 if (n > bit_size)
1146 n = bit_size;
1147 if (n < 8) {
1148 m = ((1 << n) - 1) << bit_pos;
1149 vpushi(m), gen_op('&'); // X B V1
1150 vpushv(vtop-1); // X B V1 B
1151 vpushi(m & 0x80 ? ~m & 0x7f : ~m);
1152 gen_op('&'); // X B V1 B1
1153 gen_op('|'); // X B V2
1155 vdup(), vtop[-1] = vtop[-2]; // X B B V2
1156 vstore(), vpop(); // X B
1157 bits += n, bit_size -= n, bit_pos = 0, o = 1;
1158 } while (bit_size);
1159 vpop(), vpop();
1162 static int adjust_bf(SValue *sv, int bit_pos, int bit_size)
1164 int t;
1165 if (0 == sv->type.ref)
1166 return 0;
1167 t = sv->type.ref->auxtype;
1168 if (t != -1 && t != VT_STRUCT) {
1169 sv->type.t = (sv->type.t & ~VT_BTYPE) | t;
1170 sv->r = (sv->r & ~VT_LVAL_TYPE) | lvalue_type(sv->type.t);
1172 return t;
1175 /* store vtop a register belonging to class 'rc'. lvalues are
1176 converted to values. Cannot be used if cannot be converted to
1177 register value (such as structures). */
1178 ST_FUNC int gv(int rc)
1180 int r, bit_pos, bit_size, size, align, rc2;
1182 /* NOTE: get_reg can modify vstack[] */
1183 if (vtop->type.t & VT_BITFIELD) {
1184 CType type;
1186 bit_pos = BIT_POS(vtop->type.t);
1187 bit_size = BIT_SIZE(vtop->type.t);
1188 /* remove bit field info to avoid loops */
1189 vtop->type.t &= ~VT_STRUCT_MASK;
1191 type.ref = NULL;
1192 type.t = vtop->type.t & VT_UNSIGNED;
1193 if ((vtop->type.t & VT_BTYPE) == VT_BOOL)
1194 type.t |= VT_UNSIGNED;
1196 r = adjust_bf(vtop, bit_pos, bit_size);
1198 if ((vtop->type.t & VT_BTYPE) == VT_LLONG)
1199 type.t |= VT_LLONG;
1200 else
1201 type.t |= VT_INT;
1203 if (r == VT_STRUCT) {
1204 load_packed_bf(&type, bit_pos, bit_size);
1205 } else {
1206 int bits = (type.t & VT_BTYPE) == VT_LLONG ? 64 : 32;
1207 /* cast to int to propagate signedness in following ops */
1208 gen_cast(&type);
1209 /* generate shifts */
1210 vpushi(bits - (bit_pos + bit_size));
1211 gen_op(TOK_SHL);
1212 vpushi(bits - bit_size);
1213 /* NOTE: transformed to SHR if unsigned */
1214 gen_op(TOK_SAR);
1216 r = gv(rc);
1217 } else {
1218 if (is_float(vtop->type.t) &&
1219 (vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1220 unsigned long offset;
1221 /* CPUs usually cannot use float constants, so we store them
1222 generically in data segment */
1223 size = type_size(&vtop->type, &align);
1224 if (NODATA_WANTED)
1225 size = 0, align = 1;
1226 offset = section_add(data_section, size, align);
1227 vpush_ref(&vtop->type, data_section, offset, size);
1228 vswap();
1229 init_putv(&vtop->type, data_section, offset);
1230 vtop->r |= VT_LVAL;
1232 #ifdef CONFIG_TCC_BCHECK
1233 if (vtop->r & VT_MUSTBOUND)
1234 gbound();
1235 #endif
1237 r = vtop->r & VT_VALMASK;
1238 rc2 = (rc & RC_FLOAT) ? RC_FLOAT : RC_INT;
1239 #ifndef TCC_TARGET_ARM64
1240 if (rc == RC_IRET)
1241 rc2 = RC_LRET;
1242 #ifdef TCC_TARGET_X86_64
1243 else if (rc == RC_FRET)
1244 rc2 = RC_QRET;
1245 #endif
1246 #endif
1247 /* need to reload if:
1248 - constant
1249 - lvalue (need to dereference pointer)
1250 - already a register, but not in the right class */
1251 if (r >= VT_CONST
1252 || (vtop->r & VT_LVAL)
1253 || !(reg_classes[r] & rc)
1254 #if PTR_SIZE == 8
1255 || ((vtop->type.t & VT_BTYPE) == VT_QLONG && !(reg_classes[vtop->r2] & rc2))
1256 || ((vtop->type.t & VT_BTYPE) == VT_QFLOAT && !(reg_classes[vtop->r2] & rc2))
1257 #else
1258 || ((vtop->type.t & VT_BTYPE) == VT_LLONG && !(reg_classes[vtop->r2] & rc2))
1259 #endif
1262 r = get_reg(rc);
1263 #if PTR_SIZE == 8
1264 if (((vtop->type.t & VT_BTYPE) == VT_QLONG) || ((vtop->type.t & VT_BTYPE) == VT_QFLOAT)) {
1265 int addr_type = VT_LLONG, load_size = 8, load_type = ((vtop->type.t & VT_BTYPE) == VT_QLONG) ? VT_LLONG : VT_DOUBLE;
1266 #else
1267 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
1268 int addr_type = VT_INT, load_size = 4, load_type = VT_INT;
1269 unsigned long long ll;
1270 #endif
1271 int r2, original_type;
1272 original_type = vtop->type.t;
1273 /* two register type load : expand to two words
1274 temporarily */
1275 #if PTR_SIZE == 4
1276 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1277 /* load constant */
1278 ll = vtop->c.i;
1279 vtop->c.i = ll; /* first word */
1280 load(r, vtop);
1281 vtop->r = r; /* save register value */
1282 vpushi(ll >> 32); /* second word */
1283 } else
1284 #endif
1285 if (vtop->r & VT_LVAL) {
1286 /* We do not want to modifier the long long
1287 pointer here, so the safest (and less
1288 efficient) is to save all the other registers
1289 in the stack. XXX: totally inefficient. */
1290 #if 0
1291 save_regs(1);
1292 #else
1293 /* lvalue_save: save only if used further down the stack */
1294 save_reg_upstack(vtop->r, 1);
1295 #endif
1296 /* load from memory */
1297 vtop->type.t = load_type;
1298 load(r, vtop);
1299 vdup();
1300 vtop[-1].r = r; /* save register value */
1301 /* increment pointer to get second word */
1302 vtop->type.t = addr_type;
1303 gaddrof();
1304 vpushi(load_size);
1305 gen_op('+');
1306 vtop->r |= VT_LVAL;
1307 vtop->type.t = load_type;
1308 } else {
1309 /* move registers */
1310 load(r, vtop);
1311 vdup();
1312 vtop[-1].r = r; /* save register value */
1313 vtop->r = vtop[-1].r2;
1315 /* Allocate second register. Here we rely on the fact that
1316 get_reg() tries first to free r2 of an SValue. */
1317 r2 = get_reg(rc2);
1318 load(r2, vtop);
1319 vpop();
1320 /* write second register */
1321 vtop->r2 = r2;
1322 vtop->type.t = original_type;
1323 } else if ((vtop->r & VT_LVAL) && !is_float(vtop->type.t)) {
1324 int t1, t;
1325 /* lvalue of scalar type : need to use lvalue type
1326 because of possible cast */
1327 t = vtop->type.t;
1328 t1 = t;
1329 /* compute memory access type */
1330 if (vtop->r & VT_LVAL_BYTE)
1331 t = VT_BYTE;
1332 else if (vtop->r & VT_LVAL_SHORT)
1333 t = VT_SHORT;
1334 if (vtop->r & VT_LVAL_UNSIGNED)
1335 t |= VT_UNSIGNED;
1336 vtop->type.t = t;
1337 load(r, vtop);
1338 /* restore wanted type */
1339 vtop->type.t = t1;
1340 } else {
1341 /* one register type load */
1342 load(r, vtop);
1345 vtop->r = r;
1346 #ifdef TCC_TARGET_C67
1347 /* uses register pairs for doubles */
1348 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1349 vtop->r2 = r+1;
1350 #endif
1352 return r;
1355 /* generate vtop[-1] and vtop[0] in resp. classes rc1 and rc2 */
1356 ST_FUNC void gv2(int rc1, int rc2)
1358 int v;
1360 /* generate more generic register first. But VT_JMP or VT_CMP
1361 values must be generated first in all cases to avoid possible
1362 reload errors */
1363 v = vtop[0].r & VT_VALMASK;
1364 if (v != VT_CMP && (v & ~1) != VT_JMP && rc1 <= rc2) {
1365 vswap();
1366 gv(rc1);
1367 vswap();
1368 gv(rc2);
1369 /* test if reload is needed for first register */
1370 if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
1371 vswap();
1372 gv(rc1);
1373 vswap();
1375 } else {
1376 gv(rc2);
1377 vswap();
1378 gv(rc1);
1379 vswap();
1380 /* test if reload is needed for first register */
1381 if ((vtop[0].r & VT_VALMASK) >= VT_CONST) {
1382 gv(rc2);
1387 #ifndef TCC_TARGET_ARM64
1388 /* wrapper around RC_FRET to return a register by type */
1389 static int rc_fret(int t)
1391 #ifdef TCC_TARGET_X86_64
1392 if (t == VT_LDOUBLE) {
1393 return RC_ST0;
1395 #endif
1396 return RC_FRET;
1398 #endif
1400 /* wrapper around REG_FRET to return a register by type */
1401 static int reg_fret(int t)
1403 #ifdef TCC_TARGET_X86_64
1404 if (t == VT_LDOUBLE) {
1405 return TREG_ST0;
1407 #endif
1408 return REG_FRET;
1411 #if PTR_SIZE == 4
1412 /* expand 64bit on stack in two ints */
1413 static void lexpand(void)
1415 int u, v;
1416 u = vtop->type.t & (VT_DEFSIGN | VT_UNSIGNED);
1417 v = vtop->r & (VT_VALMASK | VT_LVAL);
1418 if (v == VT_CONST) {
1419 vdup();
1420 vtop[0].c.i >>= 32;
1421 } else if (v == (VT_LVAL|VT_CONST) || v == (VT_LVAL|VT_LOCAL)) {
1422 vdup();
1423 vtop[0].c.i += 4;
1424 } else {
1425 gv(RC_INT);
1426 vdup();
1427 vtop[0].r = vtop[-1].r2;
1428 vtop[0].r2 = vtop[-1].r2 = VT_CONST;
1430 vtop[0].type.t = vtop[-1].type.t = VT_INT | u;
1432 #endif
1434 #ifdef TCC_TARGET_ARM
1435 /* expand long long on stack */
1436 ST_FUNC void lexpand_nr(void)
1438 int u,v;
1440 u = vtop->type.t & (VT_DEFSIGN | VT_UNSIGNED);
1441 vdup();
1442 vtop->r2 = VT_CONST;
1443 vtop->type.t = VT_INT | u;
1444 v=vtop[-1].r & (VT_VALMASK | VT_LVAL);
1445 if (v == VT_CONST) {
1446 vtop[-1].c.i = vtop->c.i;
1447 vtop->c.i = vtop->c.i >> 32;
1448 vtop->r = VT_CONST;
1449 } else if (v == (VT_LVAL|VT_CONST) || v == (VT_LVAL|VT_LOCAL)) {
1450 vtop->c.i += 4;
1451 vtop->r = vtop[-1].r;
1452 } else if (v > VT_CONST) {
1453 vtop--;
1454 lexpand();
1455 } else
1456 vtop->r = vtop[-1].r2;
1457 vtop[-1].r2 = VT_CONST;
1458 vtop[-1].type.t = VT_INT | u;
1460 #endif
1462 #if PTR_SIZE == 4
1463 /* build a long long from two ints */
1464 static void lbuild(int t)
1466 gv2(RC_INT, RC_INT);
1467 vtop[-1].r2 = vtop[0].r;
1468 vtop[-1].type.t = t;
1469 vpop();
1471 #endif
1473 /* convert stack entry to register and duplicate its value in another
1474 register */
1475 static void gv_dup(void)
1477 int rc, t, r, r1;
1478 SValue sv;
1480 t = vtop->type.t;
1481 #if PTR_SIZE == 4
1482 if ((t & VT_BTYPE) == VT_LLONG) {
1483 if (t & VT_BITFIELD) {
1484 gv(RC_INT);
1485 t = vtop->type.t;
1487 lexpand();
1488 gv_dup();
1489 vswap();
1490 vrotb(3);
1491 gv_dup();
1492 vrotb(4);
1493 /* stack: H L L1 H1 */
1494 lbuild(t);
1495 vrotb(3);
1496 vrotb(3);
1497 vswap();
1498 lbuild(t);
1499 vswap();
1500 } else
1501 #endif
1503 /* duplicate value */
1504 rc = RC_INT;
1505 sv.type.t = VT_INT;
1506 if (is_float(t)) {
1507 rc = RC_FLOAT;
1508 #ifdef TCC_TARGET_X86_64
1509 if ((t & VT_BTYPE) == VT_LDOUBLE) {
1510 rc = RC_ST0;
1512 #endif
1513 sv.type.t = t;
1515 r = gv(rc);
1516 r1 = get_reg(rc);
1517 sv.r = r;
1518 sv.c.i = 0;
1519 load(r1, &sv); /* move r to r1 */
1520 vdup();
1521 /* duplicates value */
1522 if (r != r1)
1523 vtop->r = r1;
1527 /* Generate value test
1529 * Generate a test for any value (jump, comparison and integers) */
1530 ST_FUNC int gvtst(int inv, int t)
1532 int v = vtop->r & VT_VALMASK;
1533 if (v != VT_CMP && v != VT_JMP && v != VT_JMPI) {
1534 vpushi(0);
1535 gen_op(TOK_NE);
1537 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
1538 /* constant jmp optimization */
1539 if ((vtop->c.i != 0) != inv)
1540 t = gjmp(t);
1541 vtop--;
1542 return t;
1544 return gtst(inv, t);
1547 #if PTR_SIZE == 4
1548 /* generate CPU independent (unsigned) long long operations */
1549 static void gen_opl(int op)
1551 int t, a, b, op1, c, i;
1552 int func;
1553 unsigned short reg_iret = REG_IRET;
1554 unsigned short reg_lret = REG_LRET;
1555 SValue tmp;
1557 switch(op) {
1558 case '/':
1559 case TOK_PDIV:
1560 func = TOK___divdi3;
1561 goto gen_func;
1562 case TOK_UDIV:
1563 func = TOK___udivdi3;
1564 goto gen_func;
1565 case '%':
1566 func = TOK___moddi3;
1567 goto gen_mod_func;
1568 case TOK_UMOD:
1569 func = TOK___umoddi3;
1570 gen_mod_func:
1571 #ifdef TCC_ARM_EABI
1572 reg_iret = TREG_R2;
1573 reg_lret = TREG_R3;
1574 #endif
1575 gen_func:
1576 /* call generic long long function */
1577 vpush_global_sym(&func_old_type, func);
1578 vrott(3);
1579 gfunc_call(2);
1580 vpushi(0);
1581 vtop->r = reg_iret;
1582 vtop->r2 = reg_lret;
1583 break;
1584 case '^':
1585 case '&':
1586 case '|':
1587 case '*':
1588 case '+':
1589 case '-':
1590 //pv("gen_opl A",0,2);
1591 t = vtop->type.t;
1592 vswap();
1593 lexpand();
1594 vrotb(3);
1595 lexpand();
1596 /* stack: L1 H1 L2 H2 */
1597 tmp = vtop[0];
1598 vtop[0] = vtop[-3];
1599 vtop[-3] = tmp;
1600 tmp = vtop[-2];
1601 vtop[-2] = vtop[-3];
1602 vtop[-3] = tmp;
1603 vswap();
1604 /* stack: H1 H2 L1 L2 */
1605 //pv("gen_opl B",0,4);
1606 if (op == '*') {
1607 vpushv(vtop - 1);
1608 vpushv(vtop - 1);
1609 gen_op(TOK_UMULL);
1610 lexpand();
1611 /* stack: H1 H2 L1 L2 ML MH */
1612 for(i=0;i<4;i++)
1613 vrotb(6);
1614 /* stack: ML MH H1 H2 L1 L2 */
1615 tmp = vtop[0];
1616 vtop[0] = vtop[-2];
1617 vtop[-2] = tmp;
1618 /* stack: ML MH H1 L2 H2 L1 */
1619 gen_op('*');
1620 vrotb(3);
1621 vrotb(3);
1622 gen_op('*');
1623 /* stack: ML MH M1 M2 */
1624 gen_op('+');
1625 gen_op('+');
1626 } else if (op == '+' || op == '-') {
1627 /* XXX: add non carry method too (for MIPS or alpha) */
1628 if (op == '+')
1629 op1 = TOK_ADDC1;
1630 else
1631 op1 = TOK_SUBC1;
1632 gen_op(op1);
1633 /* stack: H1 H2 (L1 op L2) */
1634 vrotb(3);
1635 vrotb(3);
1636 gen_op(op1 + 1); /* TOK_xxxC2 */
1637 } else {
1638 gen_op(op);
1639 /* stack: H1 H2 (L1 op L2) */
1640 vrotb(3);
1641 vrotb(3);
1642 /* stack: (L1 op L2) H1 H2 */
1643 gen_op(op);
1644 /* stack: (L1 op L2) (H1 op H2) */
1646 /* stack: L H */
1647 lbuild(t);
1648 break;
1649 case TOK_SAR:
1650 case TOK_SHR:
1651 case TOK_SHL:
1652 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
1653 t = vtop[-1].type.t;
1654 vswap();
1655 lexpand();
1656 vrotb(3);
1657 /* stack: L H shift */
1658 c = (int)vtop->c.i;
1659 /* constant: simpler */
1660 /* NOTE: all comments are for SHL. the other cases are
1661 done by swapping words */
1662 vpop();
1663 if (op != TOK_SHL)
1664 vswap();
1665 if (c >= 32) {
1666 /* stack: L H */
1667 vpop();
1668 if (c > 32) {
1669 vpushi(c - 32);
1670 gen_op(op);
1672 if (op != TOK_SAR) {
1673 vpushi(0);
1674 } else {
1675 gv_dup();
1676 vpushi(31);
1677 gen_op(TOK_SAR);
1679 vswap();
1680 } else {
1681 vswap();
1682 gv_dup();
1683 /* stack: H L L */
1684 vpushi(c);
1685 gen_op(op);
1686 vswap();
1687 vpushi(32 - c);
1688 if (op == TOK_SHL)
1689 gen_op(TOK_SHR);
1690 else
1691 gen_op(TOK_SHL);
1692 vrotb(3);
1693 /* stack: L L H */
1694 vpushi(c);
1695 if (op == TOK_SHL)
1696 gen_op(TOK_SHL);
1697 else
1698 gen_op(TOK_SHR);
1699 gen_op('|');
1701 if (op != TOK_SHL)
1702 vswap();
1703 lbuild(t);
1704 } else {
1705 /* XXX: should provide a faster fallback on x86 ? */
1706 switch(op) {
1707 case TOK_SAR:
1708 func = TOK___ashrdi3;
1709 goto gen_func;
1710 case TOK_SHR:
1711 func = TOK___lshrdi3;
1712 goto gen_func;
1713 case TOK_SHL:
1714 func = TOK___ashldi3;
1715 goto gen_func;
1718 break;
1719 default:
1720 /* compare operations */
1721 t = vtop->type.t;
1722 vswap();
1723 lexpand();
1724 vrotb(3);
1725 lexpand();
1726 /* stack: L1 H1 L2 H2 */
1727 tmp = vtop[-1];
1728 vtop[-1] = vtop[-2];
1729 vtop[-2] = tmp;
1730 /* stack: L1 L2 H1 H2 */
1731 /* compare high */
1732 op1 = op;
1733 /* when values are equal, we need to compare low words. since
1734 the jump is inverted, we invert the test too. */
1735 if (op1 == TOK_LT)
1736 op1 = TOK_LE;
1737 else if (op1 == TOK_GT)
1738 op1 = TOK_GE;
1739 else if (op1 == TOK_ULT)
1740 op1 = TOK_ULE;
1741 else if (op1 == TOK_UGT)
1742 op1 = TOK_UGE;
1743 a = 0;
1744 b = 0;
1745 gen_op(op1);
1746 if (op == TOK_NE) {
1747 b = gvtst(0, 0);
1748 } else {
1749 a = gvtst(1, 0);
1750 if (op != TOK_EQ) {
1751 /* generate non equal test */
1752 vpushi(TOK_NE);
1753 vtop->r = VT_CMP;
1754 b = gvtst(0, 0);
1757 /* compare low. Always unsigned */
1758 op1 = op;
1759 if (op1 == TOK_LT)
1760 op1 = TOK_ULT;
1761 else if (op1 == TOK_LE)
1762 op1 = TOK_ULE;
1763 else if (op1 == TOK_GT)
1764 op1 = TOK_UGT;
1765 else if (op1 == TOK_GE)
1766 op1 = TOK_UGE;
1767 gen_op(op1);
1768 a = gvtst(1, a);
1769 gsym(b);
1770 vseti(VT_JMPI, a);
1771 break;
1774 #endif
1776 static uint64_t gen_opic_sdiv(uint64_t a, uint64_t b)
1778 uint64_t x = (a >> 63 ? -a : a) / (b >> 63 ? -b : b);
1779 return (a ^ b) >> 63 ? -x : x;
1782 static int gen_opic_lt(uint64_t a, uint64_t b)
1784 return (a ^ (uint64_t)1 << 63) < (b ^ (uint64_t)1 << 63);
1787 /* handle integer constant optimizations and various machine
1788 independent opt */
1789 static void gen_opic(int op)
1791 SValue *v1 = vtop - 1;
1792 SValue *v2 = vtop;
1793 int t1 = v1->type.t & VT_BTYPE;
1794 int t2 = v2->type.t & VT_BTYPE;
1795 int c1 = (v1->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1796 int c2 = (v2->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1797 uint64_t l1 = c1 ? v1->c.i : 0;
1798 uint64_t l2 = c2 ? v2->c.i : 0;
1799 int shm = (t1 == VT_LLONG) ? 63 : 31;
1801 if (t1 != VT_LLONG && (PTR_SIZE != 8 || t1 != VT_PTR))
1802 l1 = ((uint32_t)l1 |
1803 (v1->type.t & VT_UNSIGNED ? 0 : -(l1 & 0x80000000)));
1804 if (t2 != VT_LLONG && (PTR_SIZE != 8 || t2 != VT_PTR))
1805 l2 = ((uint32_t)l2 |
1806 (v2->type.t & VT_UNSIGNED ? 0 : -(l2 & 0x80000000)));
1808 if (c1 && c2) {
1809 switch(op) {
1810 case '+': l1 += l2; break;
1811 case '-': l1 -= l2; break;
1812 case '&': l1 &= l2; break;
1813 case '^': l1 ^= l2; break;
1814 case '|': l1 |= l2; break;
1815 case '*': l1 *= l2; break;
1817 case TOK_PDIV:
1818 case '/':
1819 case '%':
1820 case TOK_UDIV:
1821 case TOK_UMOD:
1822 /* if division by zero, generate explicit division */
1823 if (l2 == 0) {
1824 if (const_wanted)
1825 tcc_error("division by zero in constant");
1826 goto general_case;
1828 switch(op) {
1829 default: l1 = gen_opic_sdiv(l1, l2); break;
1830 case '%': l1 = l1 - l2 * gen_opic_sdiv(l1, l2); break;
1831 case TOK_UDIV: l1 = l1 / l2; break;
1832 case TOK_UMOD: l1 = l1 % l2; break;
1834 break;
1835 case TOK_SHL: l1 <<= (l2 & shm); break;
1836 case TOK_SHR: l1 >>= (l2 & shm); break;
1837 case TOK_SAR:
1838 l1 = (l1 >> 63) ? ~(~l1 >> (l2 & shm)) : l1 >> (l2 & shm);
1839 break;
1840 /* tests */
1841 case TOK_ULT: l1 = l1 < l2; break;
1842 case TOK_UGE: l1 = l1 >= l2; break;
1843 case TOK_EQ: l1 = l1 == l2; break;
1844 case TOK_NE: l1 = l1 != l2; break;
1845 case TOK_ULE: l1 = l1 <= l2; break;
1846 case TOK_UGT: l1 = l1 > l2; break;
1847 case TOK_LT: l1 = gen_opic_lt(l1, l2); break;
1848 case TOK_GE: l1 = !gen_opic_lt(l1, l2); break;
1849 case TOK_LE: l1 = !gen_opic_lt(l2, l1); break;
1850 case TOK_GT: l1 = gen_opic_lt(l2, l1); break;
1851 /* logical */
1852 case TOK_LAND: l1 = l1 && l2; break;
1853 case TOK_LOR: l1 = l1 || l2; break;
1854 default:
1855 goto general_case;
1857 if (t1 != VT_LLONG && (PTR_SIZE != 8 || t1 != VT_PTR))
1858 l1 = ((uint32_t)l1 |
1859 (v1->type.t & VT_UNSIGNED ? 0 : -(l1 & 0x80000000)));
1860 v1->c.i = l1;
1861 vtop--;
1862 } else {
1863 /* if commutative ops, put c2 as constant */
1864 if (c1 && (op == '+' || op == '&' || op == '^' ||
1865 op == '|' || op == '*')) {
1866 vswap();
1867 c2 = c1; //c = c1, c1 = c2, c2 = c;
1868 l2 = l1; //l = l1, l1 = l2, l2 = l;
1870 if (!const_wanted &&
1871 c1 && ((l1 == 0 &&
1872 (op == TOK_SHL || op == TOK_SHR || op == TOK_SAR)) ||
1873 (l1 == -1 && op == TOK_SAR))) {
1874 /* treat (0 << x), (0 >> x) and (-1 >> x) as constant */
1875 vtop--;
1876 } else if (!const_wanted &&
1877 c2 && ((l2 == 0 && (op == '&' || op == '*')) ||
1878 (op == '|' &&
1879 (l2 == -1 || (l2 == 0xFFFFFFFF && t2 != VT_LLONG))) ||
1880 (l2 == 1 && (op == '%' || op == TOK_UMOD)))) {
1881 /* treat (x & 0), (x * 0), (x | -1) and (x % 1) as constant */
1882 if (l2 == 1)
1883 vtop->c.i = 0;
1884 vswap();
1885 vtop--;
1886 } else if (c2 && (((op == '*' || op == '/' || op == TOK_UDIV ||
1887 op == TOK_PDIV) &&
1888 l2 == 1) ||
1889 ((op == '+' || op == '-' || op == '|' || op == '^' ||
1890 op == TOK_SHL || op == TOK_SHR || op == TOK_SAR) &&
1891 l2 == 0) ||
1892 (op == '&' &&
1893 (l2 == -1 || (l2 == 0xFFFFFFFF && t2 != VT_LLONG))))) {
1894 /* filter out NOP operations like x*1, x-0, x&-1... */
1895 vtop--;
1896 } else if (c2 && (op == '*' || op == TOK_PDIV || op == TOK_UDIV)) {
1897 /* try to use shifts instead of muls or divs */
1898 if (l2 > 0 && (l2 & (l2 - 1)) == 0) {
1899 int n = -1;
1900 while (l2) {
1901 l2 >>= 1;
1902 n++;
1904 vtop->c.i = n;
1905 if (op == '*')
1906 op = TOK_SHL;
1907 else if (op == TOK_PDIV)
1908 op = TOK_SAR;
1909 else
1910 op = TOK_SHR;
1912 goto general_case;
1913 } else if (c2 && (op == '+' || op == '-') &&
1914 (((vtop[-1].r & (VT_VALMASK | VT_LVAL | VT_SYM)) == (VT_CONST | VT_SYM))
1915 || (vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_LOCAL)) {
1916 /* symbol + constant case */
1917 if (op == '-')
1918 l2 = -l2;
1919 l2 += vtop[-1].c.i;
1920 /* The backends can't always deal with addends to symbols
1921 larger than +-1<<31. Don't construct such. */
1922 if ((int)l2 != l2)
1923 goto general_case;
1924 vtop--;
1925 vtop->c.i = l2;
1926 } else {
1927 general_case:
1928 /* call low level op generator */
1929 if (t1 == VT_LLONG || t2 == VT_LLONG ||
1930 (PTR_SIZE == 8 && (t1 == VT_PTR || t2 == VT_PTR)))
1931 gen_opl(op);
1932 else
1933 gen_opi(op);
1938 /* generate a floating point operation with constant propagation */
1939 static void gen_opif(int op)
1941 int c1, c2;
1942 SValue *v1, *v2;
1943 #if defined _MSC_VER && defined _AMD64_
1944 /* avoid bad optimization with f1 -= f2 for f1:-0.0, f2:0.0 */
1945 volatile
1946 #endif
1947 long double f1, f2;
1949 v1 = vtop - 1;
1950 v2 = vtop;
1951 /* currently, we cannot do computations with forward symbols */
1952 c1 = (v1->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1953 c2 = (v2->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1954 if (c1 && c2) {
1955 if (v1->type.t == VT_FLOAT) {
1956 f1 = v1->c.f;
1957 f2 = v2->c.f;
1958 } else if (v1->type.t == VT_DOUBLE) {
1959 f1 = v1->c.d;
1960 f2 = v2->c.d;
1961 } else {
1962 f1 = v1->c.ld;
1963 f2 = v2->c.ld;
1966 /* NOTE: we only do constant propagation if finite number (not
1967 NaN or infinity) (ANSI spec) */
1968 if (!ieee_finite(f1) || !ieee_finite(f2))
1969 goto general_case;
1971 switch(op) {
1972 case '+': f1 += f2; break;
1973 case '-': f1 -= f2; break;
1974 case '*': f1 *= f2; break;
1975 case '/':
1976 if (f2 == 0.0) {
1977 if (const_wanted)
1978 tcc_error("division by zero in constant");
1979 goto general_case;
1981 f1 /= f2;
1982 break;
1983 /* XXX: also handles tests ? */
1984 default:
1985 goto general_case;
1987 /* XXX: overflow test ? */
1988 if (v1->type.t == VT_FLOAT) {
1989 v1->c.f = f1;
1990 } else if (v1->type.t == VT_DOUBLE) {
1991 v1->c.d = f1;
1992 } else {
1993 v1->c.ld = f1;
1995 vtop--;
1996 } else {
1997 general_case:
1998 gen_opf(op);
2002 static int pointed_size(CType *type)
2004 int align;
2005 return type_size(pointed_type(type), &align);
2008 static void vla_runtime_pointed_size(CType *type)
2010 int align;
2011 vla_runtime_type_size(pointed_type(type), &align);
2014 static inline int is_null_pointer(SValue *p)
2016 if ((p->r & (VT_VALMASK | VT_LVAL | VT_SYM)) != VT_CONST)
2017 return 0;
2018 return ((p->type.t & VT_BTYPE) == VT_INT && (uint32_t)p->c.i == 0) ||
2019 ((p->type.t & VT_BTYPE) == VT_LLONG && p->c.i == 0) ||
2020 ((p->type.t & VT_BTYPE) == VT_PTR &&
2021 (PTR_SIZE == 4 ? (uint32_t)p->c.i == 0 : p->c.i == 0));
2024 static inline int is_integer_btype(int bt)
2026 return (bt == VT_BYTE || bt == VT_SHORT ||
2027 bt == VT_INT || bt == VT_LLONG);
2030 /* check types for comparison or subtraction of pointers */
2031 static void check_comparison_pointer_types(SValue *p1, SValue *p2, int op)
2033 CType *type1, *type2, tmp_type1, tmp_type2;
2034 int bt1, bt2;
2036 /* null pointers are accepted for all comparisons as gcc */
2037 if (is_null_pointer(p1) || is_null_pointer(p2))
2038 return;
2039 type1 = &p1->type;
2040 type2 = &p2->type;
2041 bt1 = type1->t & VT_BTYPE;
2042 bt2 = type2->t & VT_BTYPE;
2043 /* accept comparison between pointer and integer with a warning */
2044 if ((is_integer_btype(bt1) || is_integer_btype(bt2)) && op != '-') {
2045 if (op != TOK_LOR && op != TOK_LAND )
2046 tcc_warning("comparison between pointer and integer");
2047 return;
2050 /* both must be pointers or implicit function pointers */
2051 if (bt1 == VT_PTR) {
2052 type1 = pointed_type(type1);
2053 } else if (bt1 != VT_FUNC)
2054 goto invalid_operands;
2056 if (bt2 == VT_PTR) {
2057 type2 = pointed_type(type2);
2058 } else if (bt2 != VT_FUNC) {
2059 invalid_operands:
2060 tcc_error("invalid operands to binary %s", get_tok_str(op, NULL));
2062 if ((type1->t & VT_BTYPE) == VT_VOID ||
2063 (type2->t & VT_BTYPE) == VT_VOID)
2064 return;
2065 tmp_type1 = *type1;
2066 tmp_type2 = *type2;
2067 tmp_type1.t &= ~(VT_DEFSIGN | VT_UNSIGNED | VT_CONSTANT | VT_VOLATILE);
2068 tmp_type2.t &= ~(VT_DEFSIGN | VT_UNSIGNED | VT_CONSTANT | VT_VOLATILE);
2069 if (!is_compatible_types(&tmp_type1, &tmp_type2)) {
2070 /* gcc-like error if '-' is used */
2071 if (op == '-')
2072 goto invalid_operands;
2073 else
2074 tcc_warning("comparison of distinct pointer types lacks a cast");
2078 /* generic gen_op: handles types problems */
2079 ST_FUNC void gen_op(int op)
2081 int u, t1, t2, bt1, bt2, t;
2082 CType type1;
2084 redo:
2085 t1 = vtop[-1].type.t;
2086 t2 = vtop[0].type.t;
2087 bt1 = t1 & VT_BTYPE;
2088 bt2 = t2 & VT_BTYPE;
2090 if (bt1 == VT_STRUCT || bt2 == VT_STRUCT) {
2091 tcc_error("operation on a struct");
2092 } else if (bt1 == VT_FUNC || bt2 == VT_FUNC) {
2093 if (bt2 == VT_FUNC) {
2094 mk_pointer(&vtop->type);
2095 gaddrof();
2097 if (bt1 == VT_FUNC) {
2098 vswap();
2099 mk_pointer(&vtop->type);
2100 gaddrof();
2101 vswap();
2103 goto redo;
2104 } else if (bt1 == VT_PTR || bt2 == VT_PTR) {
2105 /* at least one operand is a pointer */
2106 /* relational op: must be both pointers */
2107 if (op >= TOK_ULT && op <= TOK_LOR) {
2108 check_comparison_pointer_types(vtop - 1, vtop, op);
2109 /* pointers are handled are unsigned */
2110 #if PTR_SIZE == 8
2111 t = VT_LLONG | VT_UNSIGNED;
2112 #else
2113 t = VT_INT | VT_UNSIGNED;
2114 #endif
2115 goto std_op;
2117 /* if both pointers, then it must be the '-' op */
2118 if (bt1 == VT_PTR && bt2 == VT_PTR) {
2119 if (op != '-')
2120 tcc_error("cannot use pointers here");
2121 check_comparison_pointer_types(vtop - 1, vtop, op);
2122 /* XXX: check that types are compatible */
2123 if (vtop[-1].type.t & VT_VLA) {
2124 vla_runtime_pointed_size(&vtop[-1].type);
2125 } else {
2126 vpushi(pointed_size(&vtop[-1].type));
2128 vrott(3);
2129 gen_opic(op);
2130 /* set to integer type */
2131 #if PTR_SIZE == 8
2132 vtop->type.t = VT_LLONG;
2133 #else
2134 vtop->type.t = VT_INT;
2135 #endif
2136 vswap();
2137 gen_op(TOK_PDIV);
2138 } else {
2139 /* exactly one pointer : must be '+' or '-'. */
2140 if (op != '-' && op != '+')
2141 tcc_error("cannot use pointers here");
2142 /* Put pointer as first operand */
2143 if (bt2 == VT_PTR) {
2144 vswap();
2145 t = t1, t1 = t2, t2 = t;
2147 #if PTR_SIZE == 4
2148 if ((vtop[0].type.t & VT_BTYPE) == VT_LLONG)
2149 /* XXX: truncate here because gen_opl can't handle ptr + long long */
2150 gen_cast_s(VT_INT);
2151 #endif
2152 type1 = vtop[-1].type;
2153 type1.t &= ~VT_ARRAY;
2154 if (vtop[-1].type.t & VT_VLA)
2155 vla_runtime_pointed_size(&vtop[-1].type);
2156 else {
2157 u = pointed_size(&vtop[-1].type);
2158 if (u < 0)
2159 tcc_error("unknown array element size");
2160 #if PTR_SIZE == 8
2161 vpushll(u);
2162 #else
2163 /* XXX: cast to int ? (long long case) */
2164 vpushi(u);
2165 #endif
2167 gen_op('*');
2168 #if 0
2169 /* #ifdef CONFIG_TCC_BCHECK
2170 The main reason to removing this code:
2171 #include <stdio.h>
2172 int main ()
2174 int v[10];
2175 int i = 10;
2176 int j = 9;
2177 fprintf(stderr, "v+i-j = %p\n", v+i-j);
2178 fprintf(stderr, "v+(i-j) = %p\n", v+(i-j));
2180 When this code is on. then the output looks like
2181 v+i-j = 0xfffffffe
2182 v+(i-j) = 0xbff84000
2184 /* if evaluating constant expression, no code should be
2185 generated, so no bound check */
2186 if (tcc_state->do_bounds_check && !const_wanted) {
2187 /* if bounded pointers, we generate a special code to
2188 test bounds */
2189 if (op == '-') {
2190 vpushi(0);
2191 vswap();
2192 gen_op('-');
2194 gen_bounded_ptr_add();
2195 } else
2196 #endif
2198 gen_opic(op);
2200 /* put again type if gen_opic() swaped operands */
2201 vtop->type = type1;
2203 } else if (is_float(bt1) || is_float(bt2)) {
2204 /* compute bigger type and do implicit casts */
2205 if (bt1 == VT_LDOUBLE || bt2 == VT_LDOUBLE) {
2206 t = VT_LDOUBLE;
2207 } else if (bt1 == VT_DOUBLE || bt2 == VT_DOUBLE) {
2208 t = VT_DOUBLE;
2209 } else {
2210 t = VT_FLOAT;
2212 /* floats can only be used for a few operations */
2213 if (op != '+' && op != '-' && op != '*' && op != '/' &&
2214 (op < TOK_ULT || op > TOK_GT))
2215 tcc_error("invalid operands for binary operation");
2216 goto std_op;
2217 } else if (op == TOK_SHR || op == TOK_SAR || op == TOK_SHL) {
2218 t = bt1 == VT_LLONG ? VT_LLONG : VT_INT;
2219 if ((t1 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (t | VT_UNSIGNED))
2220 t |= VT_UNSIGNED;
2221 goto std_op;
2222 } else if (bt1 == VT_LLONG || bt2 == VT_LLONG) {
2223 /* cast to biggest op */
2224 t = VT_LLONG;
2225 /* check if we need to keep type as long or as long long */
2226 if ((t1 & VT_LONG && (t2 & (VT_BTYPE | VT_LONG)) != VT_LLONG) ||
2227 (t2 & VT_LONG && (t1 & (VT_BTYPE | VT_LONG)) != VT_LLONG))
2228 t |= VT_LONG;
2229 /* convert to unsigned if it does not fit in a long long */
2230 if ((t1 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_LLONG | VT_UNSIGNED) ||
2231 (t2 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_LLONG | VT_UNSIGNED))
2232 t |= VT_UNSIGNED;
2233 goto std_op;
2234 } else {
2235 /* integer operations */
2236 t = VT_INT;
2238 if ((t1 & VT_LONG) || (t2 & VT_LONG))
2239 t |= VT_LONG;
2241 /* convert to unsigned if it does not fit in an integer */
2242 if ((t1 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_INT | VT_UNSIGNED) ||
2243 (t2 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_INT | VT_UNSIGNED))
2244 t |= VT_UNSIGNED;
2245 std_op:
2246 /* XXX: currently, some unsigned operations are explicit, so
2247 we modify them here */
2248 if (t & VT_UNSIGNED) {
2249 if (op == TOK_SAR)
2250 op = TOK_SHR;
2251 else if (op == '/')
2252 op = TOK_UDIV;
2253 else if (op == '%')
2254 op = TOK_UMOD;
2255 else if (op == TOK_LT)
2256 op = TOK_ULT;
2257 else if (op == TOK_GT)
2258 op = TOK_UGT;
2259 else if (op == TOK_LE)
2260 op = TOK_ULE;
2261 else if (op == TOK_GE)
2262 op = TOK_UGE;
2264 vswap();
2265 type1.t = t;
2266 type1.ref = NULL;
2267 gen_cast(&type1);
2268 vswap();
2269 /* special case for shifts and long long: we keep the shift as
2270 an integer */
2271 if (op == TOK_SHR || op == TOK_SAR || op == TOK_SHL)
2272 type1.t = VT_INT;
2273 gen_cast(&type1);
2274 if (is_float(t))
2275 gen_opif(op);
2276 else
2277 gen_opic(op);
2278 if (op >= TOK_ULT && op <= TOK_GT) {
2279 /* relational op: the result is an int */
2280 vtop->type.t = VT_INT;
2281 } else {
2282 vtop->type.t = t;
2285 // Make sure that we have converted to an rvalue:
2286 if (vtop->r & VT_LVAL)
2287 gv(is_float(vtop->type.t & VT_BTYPE) ? RC_FLOAT : RC_INT);
2290 #ifndef TCC_TARGET_ARM
2291 /* generic itof for unsigned long long case */
2292 static void gen_cvt_itof1(int t)
2294 #ifdef TCC_TARGET_ARM64
2295 gen_cvt_itof(t);
2296 #else
2297 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
2298 (VT_LLONG | VT_UNSIGNED)) {
2300 if (t == VT_FLOAT)
2301 vpush_global_sym(&func_old_type, TOK___floatundisf);
2302 #if LDOUBLE_SIZE != 8
2303 else if (t == VT_LDOUBLE)
2304 vpush_global_sym(&func_old_type, TOK___floatundixf);
2305 #endif
2306 else
2307 vpush_global_sym(&func_old_type, TOK___floatundidf);
2308 vrott(2);
2309 gfunc_call(1);
2310 vpushi(0);
2311 vtop->r = reg_fret(t);
2312 } else {
2313 gen_cvt_itof(t);
2315 #endif
2317 #endif
2319 /* generic ftoi for unsigned long long case */
2320 static void gen_cvt_ftoi1(int t)
2322 #ifdef TCC_TARGET_ARM64
2323 gen_cvt_ftoi(t);
2324 #else
2325 int st;
2327 if (t == (VT_LLONG | VT_UNSIGNED)) {
2328 /* not handled natively */
2329 st = vtop->type.t & VT_BTYPE;
2330 if (st == VT_FLOAT)
2331 vpush_global_sym(&func_old_type, TOK___fixunssfdi);
2332 #if LDOUBLE_SIZE != 8
2333 else if (st == VT_LDOUBLE)
2334 vpush_global_sym(&func_old_type, TOK___fixunsxfdi);
2335 #endif
2336 else
2337 vpush_global_sym(&func_old_type, TOK___fixunsdfdi);
2338 vrott(2);
2339 gfunc_call(1);
2340 vpushi(0);
2341 vtop->r = REG_IRET;
2342 vtop->r2 = REG_LRET;
2343 } else {
2344 gen_cvt_ftoi(t);
2346 #endif
2349 /* force char or short cast */
2350 static void force_charshort_cast(int t)
2352 int bits, dbt;
2354 /* cannot cast static initializers */
2355 if (STATIC_DATA_WANTED)
2356 return;
2358 dbt = t & VT_BTYPE;
2359 /* XXX: add optimization if lvalue : just change type and offset */
2360 if (dbt == VT_BYTE)
2361 bits = 8;
2362 else
2363 bits = 16;
2364 if (t & VT_UNSIGNED) {
2365 vpushi((1 << bits) - 1);
2366 gen_op('&');
2367 } else {
2368 if ((vtop->type.t & VT_BTYPE) == VT_LLONG)
2369 bits = 64 - bits;
2370 else
2371 bits = 32 - bits;
2372 vpushi(bits);
2373 gen_op(TOK_SHL);
2374 /* result must be signed or the SAR is converted to an SHL
2375 This was not the case when "t" was a signed short
2376 and the last value on the stack was an unsigned int */
2377 vtop->type.t &= ~VT_UNSIGNED;
2378 vpushi(bits);
2379 gen_op(TOK_SAR);
2383 /* cast 'vtop' to 'type'. Casting to bitfields is forbidden. */
2384 static void gen_cast_s(int t)
2386 CType type;
2387 type.t = t;
2388 type.ref = NULL;
2389 gen_cast(&type);
2392 static void gen_cast(CType *type)
2394 int sbt, dbt, sf, df, c, p;
2396 /* special delayed cast for char/short */
2397 /* XXX: in some cases (multiple cascaded casts), it may still
2398 be incorrect */
2399 if (vtop->r & VT_MUSTCAST) {
2400 vtop->r &= ~VT_MUSTCAST;
2401 force_charshort_cast(vtop->type.t);
2404 /* bitfields first get cast to ints */
2405 if (vtop->type.t & VT_BITFIELD) {
2406 gv(RC_INT);
2409 dbt = type->t & (VT_BTYPE | VT_UNSIGNED);
2410 sbt = vtop->type.t & (VT_BTYPE | VT_UNSIGNED);
2412 if (sbt != dbt) {
2413 sf = is_float(sbt);
2414 df = is_float(dbt);
2415 c = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
2416 p = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == (VT_CONST | VT_SYM);
2417 if (c) {
2418 /* constant case: we can do it now */
2419 /* XXX: in ISOC, cannot do it if error in convert */
2420 if (sbt == VT_FLOAT)
2421 vtop->c.ld = vtop->c.f;
2422 else if (sbt == VT_DOUBLE)
2423 vtop->c.ld = vtop->c.d;
2425 if (df) {
2426 if ((sbt & VT_BTYPE) == VT_LLONG) {
2427 if ((sbt & VT_UNSIGNED) || !(vtop->c.i >> 63))
2428 vtop->c.ld = vtop->c.i;
2429 else
2430 vtop->c.ld = -(long double)-vtop->c.i;
2431 } else if(!sf) {
2432 if ((sbt & VT_UNSIGNED) || !(vtop->c.i >> 31))
2433 vtop->c.ld = (uint32_t)vtop->c.i;
2434 else
2435 vtop->c.ld = -(long double)-(uint32_t)vtop->c.i;
2438 if (dbt == VT_FLOAT)
2439 vtop->c.f = (float)vtop->c.ld;
2440 else if (dbt == VT_DOUBLE)
2441 vtop->c.d = (double)vtop->c.ld;
2442 } else if (sf && dbt == (VT_LLONG|VT_UNSIGNED)) {
2443 vtop->c.i = vtop->c.ld;
2444 } else if (sf && dbt == VT_BOOL) {
2445 vtop->c.i = (vtop->c.ld != 0);
2446 } else {
2447 if(sf)
2448 vtop->c.i = vtop->c.ld;
2449 else if (sbt == (VT_LLONG|VT_UNSIGNED))
2451 else if (sbt & VT_UNSIGNED)
2452 vtop->c.i = (uint32_t)vtop->c.i;
2453 #if PTR_SIZE == 8
2454 else if (sbt == VT_PTR)
2456 #endif
2457 else if (sbt != VT_LLONG)
2458 vtop->c.i = ((uint32_t)vtop->c.i |
2459 -(vtop->c.i & 0x80000000));
2461 if (dbt == (VT_LLONG|VT_UNSIGNED))
2463 else if (dbt == VT_BOOL)
2464 vtop->c.i = (vtop->c.i != 0);
2465 #if PTR_SIZE == 8
2466 else if (dbt == VT_PTR)
2468 #endif
2469 else if (dbt != VT_LLONG) {
2470 uint32_t m = ((dbt & VT_BTYPE) == VT_BYTE ? 0xff :
2471 (dbt & VT_BTYPE) == VT_SHORT ? 0xffff :
2472 0xffffffff);
2473 vtop->c.i &= m;
2474 if (!(dbt & VT_UNSIGNED))
2475 vtop->c.i |= -(vtop->c.i & ((m >> 1) + 1));
2478 } else if (p && dbt == VT_BOOL) {
2479 vtop->r = VT_CONST;
2480 vtop->c.i = 1;
2481 } else {
2482 /* non constant case: generate code */
2483 if (sf && df) {
2484 /* convert from fp to fp */
2485 gen_cvt_ftof(dbt);
2486 } else if (df) {
2487 /* convert int to fp */
2488 gen_cvt_itof1(dbt);
2489 } else if (sf) {
2490 /* convert fp to int */
2491 if (dbt == VT_BOOL) {
2492 vpushi(0);
2493 gen_op(TOK_NE);
2494 } else {
2495 /* we handle char/short/etc... with generic code */
2496 if (dbt != (VT_INT | VT_UNSIGNED) &&
2497 dbt != (VT_LLONG | VT_UNSIGNED) &&
2498 dbt != VT_LLONG)
2499 dbt = VT_INT;
2500 gen_cvt_ftoi1(dbt);
2501 if (dbt == VT_INT && (type->t & (VT_BTYPE | VT_UNSIGNED)) != dbt) {
2502 /* additional cast for char/short... */
2503 vtop->type.t = dbt;
2504 gen_cast(type);
2507 #if PTR_SIZE == 4
2508 } else if ((dbt & VT_BTYPE) == VT_LLONG) {
2509 if ((sbt & VT_BTYPE) != VT_LLONG) {
2510 /* scalar to long long */
2511 /* machine independent conversion */
2512 gv(RC_INT);
2513 /* generate high word */
2514 if (sbt == (VT_INT | VT_UNSIGNED)) {
2515 vpushi(0);
2516 gv(RC_INT);
2517 } else {
2518 if (sbt == VT_PTR) {
2519 /* cast from pointer to int before we apply
2520 shift operation, which pointers don't support*/
2521 gen_cast_s(VT_INT);
2523 gv_dup();
2524 vpushi(31);
2525 gen_op(TOK_SAR);
2527 /* patch second register */
2528 vtop[-1].r2 = vtop->r;
2529 vpop();
2531 #else
2532 } else if ((dbt & VT_BTYPE) == VT_LLONG ||
2533 (dbt & VT_BTYPE) == VT_PTR ||
2534 (dbt & VT_BTYPE) == VT_FUNC) {
2535 if ((sbt & VT_BTYPE) != VT_LLONG &&
2536 (sbt & VT_BTYPE) != VT_PTR &&
2537 (sbt & VT_BTYPE) != VT_FUNC) {
2538 /* need to convert from 32bit to 64bit */
2539 gv(RC_INT);
2540 if (sbt != (VT_INT | VT_UNSIGNED)) {
2541 #if defined(TCC_TARGET_ARM64)
2542 gen_cvt_sxtw();
2543 #elif defined(TCC_TARGET_X86_64)
2544 int r = gv(RC_INT);
2545 /* x86_64 specific: movslq */
2546 o(0x6348);
2547 o(0xc0 + (REG_VALUE(r) << 3) + REG_VALUE(r));
2548 #else
2549 #error
2550 #endif
2553 #endif
2554 } else if (dbt == VT_BOOL) {
2555 /* scalar to bool */
2556 vpushi(0);
2557 gen_op(TOK_NE);
2558 } else if ((dbt & VT_BTYPE) == VT_BYTE ||
2559 (dbt & VT_BTYPE) == VT_SHORT) {
2560 if (sbt == VT_PTR) {
2561 vtop->type.t = VT_INT;
2562 tcc_warning("nonportable conversion from pointer to char/short");
2564 force_charshort_cast(dbt);
2565 #if PTR_SIZE == 4
2566 } else if ((dbt & VT_BTYPE) == VT_INT) {
2567 /* scalar to int */
2568 if ((sbt & VT_BTYPE) == VT_LLONG) {
2569 /* from long long: just take low order word */
2570 lexpand();
2571 vpop();
2573 /* if lvalue and single word type, nothing to do because
2574 the lvalue already contains the real type size (see
2575 VT_LVAL_xxx constants) */
2576 #endif
2579 } else if ((dbt & VT_BTYPE) == VT_PTR && !(vtop->r & VT_LVAL)) {
2580 /* if we are casting between pointer types,
2581 we must update the VT_LVAL_xxx size */
2582 vtop->r = (vtop->r & ~VT_LVAL_TYPE)
2583 | (lvalue_type(type->ref->type.t) & VT_LVAL_TYPE);
2585 vtop->type = *type;
2588 /* return type size as known at compile time. Put alignment at 'a' */
2589 ST_FUNC int type_size(CType *type, int *a)
2591 Sym *s;
2592 int bt;
2594 bt = type->t & VT_BTYPE;
2595 if (bt == VT_STRUCT) {
2596 /* struct/union */
2597 s = type->ref;
2598 *a = s->r;
2599 return s->c;
2600 } else if (bt == VT_PTR) {
2601 if (type->t & VT_ARRAY) {
2602 int ts;
2604 s = type->ref;
2605 ts = type_size(&s->type, a);
2607 if (ts < 0 && s->c < 0)
2608 ts = -ts;
2610 return ts * s->c;
2611 } else {
2612 *a = PTR_SIZE;
2613 return PTR_SIZE;
2615 } else if (IS_ENUM(type->t) && type->ref->c == -1) {
2616 return -1; /* incomplete enum */
2617 } else if (bt == VT_LDOUBLE) {
2618 *a = LDOUBLE_ALIGN;
2619 return LDOUBLE_SIZE;
2620 } else if (bt == VT_DOUBLE || bt == VT_LLONG) {
2621 #ifdef TCC_TARGET_I386
2622 #ifdef TCC_TARGET_PE
2623 *a = 8;
2624 #else
2625 *a = 4;
2626 #endif
2627 #elif defined(TCC_TARGET_ARM)
2628 #ifdef TCC_ARM_EABI
2629 *a = 8;
2630 #else
2631 *a = 4;
2632 #endif
2633 #else
2634 *a = 8;
2635 #endif
2636 return 8;
2637 } else if (bt == VT_INT || bt == VT_FLOAT) {
2638 *a = 4;
2639 return 4;
2640 } else if (bt == VT_SHORT) {
2641 *a = 2;
2642 return 2;
2643 } else if (bt == VT_QLONG || bt == VT_QFLOAT) {
2644 *a = 8;
2645 return 16;
2646 } else {
2647 /* char, void, function, _Bool */
2648 *a = 1;
2649 return 1;
2653 /* push type size as known at runtime time on top of value stack. Put
2654 alignment at 'a' */
2655 ST_FUNC void vla_runtime_type_size(CType *type, int *a)
2657 if (type->t & VT_VLA) {
2658 type_size(&type->ref->type, a);
2659 vset(&int_type, VT_LOCAL|VT_LVAL, type->ref->c);
2660 } else {
2661 vpushi(type_size(type, a));
2665 static void vla_sp_restore(void) {
2666 if (vlas_in_scope) {
2667 gen_vla_sp_restore(vla_sp_loc);
2671 static void vla_sp_restore_root(void) {
2672 if (vlas_in_scope) {
2673 gen_vla_sp_restore(vla_sp_root_loc);
2677 /* return the pointed type of t */
2678 static inline CType *pointed_type(CType *type)
2680 return &type->ref->type;
2683 /* modify type so that its it is a pointer to type. */
2684 ST_FUNC void mk_pointer(CType *type)
2686 Sym *s;
2687 s = sym_push(SYM_FIELD, type, 0, -1);
2688 type->t = VT_PTR | (type->t & VT_STORAGE);
2689 type->ref = s;
2692 /* compare function types. OLD functions match any new functions */
2693 static int is_compatible_func(CType *type1, CType *type2)
2695 Sym *s1, *s2;
2697 s1 = type1->ref;
2698 s2 = type2->ref;
2699 if (!is_compatible_types(&s1->type, &s2->type))
2700 return 0;
2701 /* check func_call */
2702 if (s1->f.func_call != s2->f.func_call)
2703 return 0;
2704 /* XXX: not complete */
2705 if (s1->f.func_type == FUNC_OLD || s2->f.func_type == FUNC_OLD)
2706 return 1;
2707 if (s1->f.func_type != s2->f.func_type)
2708 return 0;
2709 while (s1 != NULL) {
2710 if (s2 == NULL)
2711 return 0;
2712 if (!is_compatible_unqualified_types(&s1->type, &s2->type))
2713 return 0;
2714 s1 = s1->next;
2715 s2 = s2->next;
2717 if (s2)
2718 return 0;
2719 return 1;
2722 /* return true if type1 and type2 are the same. If unqualified is
2723 true, qualifiers on the types are ignored.
2725 - enums are not checked as gcc __builtin_types_compatible_p ()
2727 static int compare_types(CType *type1, CType *type2, int unqualified)
2729 int bt1, t1, t2;
2731 t1 = type1->t & VT_TYPE;
2732 t2 = type2->t & VT_TYPE;
2733 if (unqualified) {
2734 /* strip qualifiers before comparing */
2735 t1 &= ~(VT_CONSTANT | VT_VOLATILE);
2736 t2 &= ~(VT_CONSTANT | VT_VOLATILE);
2739 /* Default Vs explicit signedness only matters for char */
2740 if ((t1 & VT_BTYPE) != VT_BYTE) {
2741 t1 &= ~VT_DEFSIGN;
2742 t2 &= ~VT_DEFSIGN;
2745 /* XXX: bitfields ? */
2746 if (t1 != t2)
2747 return 0;
2748 /* test more complicated cases */
2749 bt1 = t1 & VT_BTYPE;
2750 if (bt1 == VT_PTR) {
2751 type1 = pointed_type(type1);
2752 type2 = pointed_type(type2);
2753 return is_compatible_types(type1, type2);
2754 } else if (bt1 == VT_STRUCT) {
2755 return (type1->ref == type2->ref);
2756 } else if (bt1 == VT_FUNC) {
2757 return is_compatible_func(type1, type2);
2758 } else {
2759 return 1;
2763 /* return true if type1 and type2 are exactly the same (including
2764 qualifiers).
2766 static int is_compatible_types(CType *type1, CType *type2)
2768 return compare_types(type1,type2,0);
2771 /* return true if type1 and type2 are the same (ignoring qualifiers).
2773 static int is_compatible_unqualified_types(CType *type1, CType *type2)
2775 return compare_types(type1,type2,1);
2778 /* print a type. If 'varstr' is not NULL, then the variable is also
2779 printed in the type */
2780 /* XXX: union */
2781 /* XXX: add array and function pointers */
2782 static void type_to_str(char *buf, int buf_size,
2783 CType *type, const char *varstr)
2785 int bt, v, t;
2786 Sym *s, *sa;
2787 char buf1[256];
2788 const char *tstr;
2790 t = type->t;
2791 bt = t & VT_BTYPE;
2792 buf[0] = '\0';
2793 if (t & VT_CONSTANT)
2794 pstrcat(buf, buf_size, "const ");
2795 if (t & VT_VOLATILE)
2796 pstrcat(buf, buf_size, "volatile ");
2797 if ((t & (VT_DEFSIGN | VT_UNSIGNED)) == (VT_DEFSIGN | VT_UNSIGNED))
2798 pstrcat(buf, buf_size, "unsigned ");
2799 else if (t & VT_DEFSIGN)
2800 pstrcat(buf, buf_size, "signed ");
2801 if (t & VT_EXTERN)
2802 pstrcat(buf, buf_size, "extern ");
2803 if (t & VT_STATIC)
2804 pstrcat(buf, buf_size, "static ");
2805 if (t & VT_TYPEDEF)
2806 pstrcat(buf, buf_size, "typedef ");
2807 if (t & VT_INLINE)
2808 pstrcat(buf, buf_size, "inline ");
2809 buf_size -= strlen(buf);
2810 buf += strlen(buf);
2811 if (IS_ENUM(t)) {
2812 tstr = "enum ";
2813 goto tstruct;
2816 if (!bt && VT_LONG & t) {
2817 tstr = "long";
2818 goto add_tstr;
2821 switch(bt) {
2822 case VT_VOID:
2823 tstr = "void";
2824 goto add_tstr;
2825 case VT_BOOL:
2826 tstr = "_Bool";
2827 goto add_tstr;
2828 case VT_BYTE:
2829 tstr = "char";
2830 goto add_tstr;
2831 case VT_SHORT:
2832 tstr = "short";
2833 goto add_tstr;
2834 case VT_INT:
2835 tstr = "int";
2836 goto add_tstr;
2837 case VT_LLONG:
2838 tstr = "long long";
2839 goto add_tstr;
2840 case VT_FLOAT:
2841 tstr = "float";
2842 goto add_tstr;
2843 case VT_DOUBLE:
2844 tstr = "double";
2845 goto add_tstr;
2846 case VT_LDOUBLE:
2847 tstr = "long double";
2848 add_tstr:
2849 pstrcat(buf, buf_size, tstr);
2850 break;
2851 case VT_STRUCT:
2852 tstr = "struct ";
2853 if (IS_UNION(t))
2854 tstr = "union ";
2855 tstruct:
2856 pstrcat(buf, buf_size, tstr);
2857 v = type->ref->v & ~SYM_STRUCT;
2858 if (v >= SYM_FIRST_ANOM)
2859 pstrcat(buf, buf_size, "<anonymous>");
2860 else
2861 pstrcat(buf, buf_size, get_tok_str(v, NULL));
2862 break;
2863 case VT_FUNC:
2864 s = type->ref;
2865 type_to_str(buf, buf_size, &s->type, varstr);
2866 pstrcat(buf, buf_size, "(");
2867 sa = s->next;
2868 while (sa != NULL) {
2869 type_to_str(buf1, sizeof(buf1), &sa->type, NULL);
2870 pstrcat(buf, buf_size, buf1);
2871 sa = sa->next;
2872 if (sa)
2873 pstrcat(buf, buf_size, ", ");
2875 pstrcat(buf, buf_size, ")");
2876 goto no_var;
2877 case VT_PTR:
2878 s = type->ref;
2879 if (t & VT_ARRAY) {
2880 snprintf(buf1, sizeof(buf1), "%s[%d]", varstr ? varstr : "", s->c);
2881 type_to_str(buf, buf_size, &s->type, buf1);
2882 goto no_var;
2884 pstrcpy(buf1, sizeof(buf1), "*");
2885 if (t & VT_CONSTANT)
2886 pstrcat(buf1, buf_size, "const ");
2887 if (t & VT_VOLATILE)
2888 pstrcat(buf1, buf_size, "volatile ");
2889 if (varstr)
2890 pstrcat(buf1, sizeof(buf1), varstr);
2891 type_to_str(buf, buf_size, &s->type, buf1);
2892 goto no_var;
2894 if (varstr) {
2895 pstrcat(buf, buf_size, " ");
2896 pstrcat(buf, buf_size, varstr);
2898 no_var: ;
2901 /* verify type compatibility to store vtop in 'dt' type, and generate
2902 casts if needed. */
2903 static void gen_assign_cast(CType *dt)
2905 CType *st, *type1, *type2, tmp_type1, tmp_type2;
2906 char buf1[256], buf2[256];
2907 int dbt, sbt;
2909 st = &vtop->type; /* source type */
2910 dbt = dt->t & VT_BTYPE;
2911 sbt = st->t & VT_BTYPE;
2912 if (sbt == VT_VOID || dbt == VT_VOID) {
2913 if (sbt == VT_VOID && dbt == VT_VOID)
2914 ; /*
2915 It is Ok if both are void
2916 A test program:
2917 void func1() {}
2918 void func2() {
2919 return func1();
2921 gcc accepts this program
2923 else
2924 tcc_error("cannot cast from/to void");
2926 if (dt->t & VT_CONSTANT)
2927 tcc_warning("assignment of read-only location");
2928 switch(dbt) {
2929 case VT_PTR:
2930 /* special cases for pointers */
2931 /* '0' can also be a pointer */
2932 if (is_null_pointer(vtop))
2933 goto type_ok;
2934 /* accept implicit pointer to integer cast with warning */
2935 if (is_integer_btype(sbt)) {
2936 tcc_warning("assignment makes pointer from integer without a cast");
2937 goto type_ok;
2939 type1 = pointed_type(dt);
2940 /* a function is implicitly a function pointer */
2941 if (sbt == VT_FUNC) {
2942 if ((type1->t & VT_BTYPE) != VT_VOID &&
2943 !is_compatible_types(pointed_type(dt), st))
2944 tcc_warning("assignment from incompatible pointer type");
2945 goto type_ok;
2947 if (sbt != VT_PTR)
2948 goto error;
2949 type2 = pointed_type(st);
2950 if ((type1->t & VT_BTYPE) == VT_VOID ||
2951 (type2->t & VT_BTYPE) == VT_VOID) {
2952 /* void * can match anything */
2953 } else {
2954 //printf("types %08x %08x\n", type1->t, type2->t);
2955 /* exact type match, except for qualifiers */
2956 if (!is_compatible_unqualified_types(type1, type2)) {
2957 /* Like GCC don't warn by default for merely changes
2958 in pointer target signedness. Do warn for different
2959 base types, though, in particular for unsigned enums
2960 and signed int targets. */
2961 if ((type1->t & VT_BTYPE) != (type2->t & VT_BTYPE)
2962 || IS_ENUM(type1->t) || IS_ENUM(type2->t)
2964 tcc_warning("assignment from incompatible pointer type");
2967 /* check const and volatile */
2968 if ((!(type1->t & VT_CONSTANT) && (type2->t & VT_CONSTANT)) ||
2969 (!(type1->t & VT_VOLATILE) && (type2->t & VT_VOLATILE)))
2970 tcc_warning("assignment discards qualifiers from pointer target type");
2971 break;
2972 case VT_BYTE:
2973 case VT_SHORT:
2974 case VT_INT:
2975 case VT_LLONG:
2976 if (sbt == VT_PTR || sbt == VT_FUNC) {
2977 tcc_warning("assignment makes integer from pointer without a cast");
2978 } else if (sbt == VT_STRUCT) {
2979 goto case_VT_STRUCT;
2981 /* XXX: more tests */
2982 break;
2983 case VT_STRUCT:
2984 case_VT_STRUCT:
2985 tmp_type1 = *dt;
2986 tmp_type2 = *st;
2987 tmp_type1.t &= ~(VT_CONSTANT | VT_VOLATILE);
2988 tmp_type2.t &= ~(VT_CONSTANT | VT_VOLATILE);
2989 if (!is_compatible_types(&tmp_type1, &tmp_type2)) {
2990 error:
2991 type_to_str(buf1, sizeof(buf1), st, NULL);
2992 type_to_str(buf2, sizeof(buf2), dt, NULL);
2993 tcc_error("cannot cast '%s' to '%s'", buf1, buf2);
2995 break;
2997 type_ok:
2998 gen_cast(dt);
3001 /* store vtop in lvalue pushed on stack */
3002 ST_FUNC void vstore(void)
3004 int sbt, dbt, ft, r, t, size, align, bit_size, bit_pos, rc, delayed_cast;
3006 ft = vtop[-1].type.t;
3007 sbt = vtop->type.t & VT_BTYPE;
3008 dbt = ft & VT_BTYPE;
3009 if ((((sbt == VT_INT || sbt == VT_SHORT) && dbt == VT_BYTE) ||
3010 (sbt == VT_INT && dbt == VT_SHORT))
3011 && !(vtop->type.t & VT_BITFIELD)) {
3012 /* optimize char/short casts */
3013 delayed_cast = VT_MUSTCAST;
3014 vtop->type.t = ft & VT_TYPE;
3015 /* XXX: factorize */
3016 if (ft & VT_CONSTANT)
3017 tcc_warning("assignment of read-only location");
3018 } else {
3019 delayed_cast = 0;
3020 if (!(ft & VT_BITFIELD))
3021 gen_assign_cast(&vtop[-1].type);
3024 if (sbt == VT_STRUCT) {
3025 /* if structure, only generate pointer */
3026 /* structure assignment : generate memcpy */
3027 /* XXX: optimize if small size */
3028 size = type_size(&vtop->type, &align);
3030 /* destination */
3031 vswap();
3032 vtop->type.t = VT_PTR;
3033 gaddrof();
3035 /* address of memcpy() */
3036 #ifdef TCC_ARM_EABI
3037 if(!(align & 7))
3038 vpush_global_sym(&func_old_type, TOK_memcpy8);
3039 else if(!(align & 3))
3040 vpush_global_sym(&func_old_type, TOK_memcpy4);
3041 else
3042 #endif
3043 /* Use memmove, rather than memcpy, as dest and src may be same: */
3044 vpush_global_sym(&func_old_type, TOK_memmove);
3046 vswap();
3047 /* source */
3048 vpushv(vtop - 2);
3049 vtop->type.t = VT_PTR;
3050 gaddrof();
3051 /* type size */
3052 vpushi(size);
3053 gfunc_call(3);
3055 /* leave source on stack */
3056 } else if (ft & VT_BITFIELD) {
3057 /* bitfield store handling */
3059 /* save lvalue as expression result (example: s.b = s.a = n;) */
3060 vdup(), vtop[-1] = vtop[-2];
3062 bit_pos = BIT_POS(ft);
3063 bit_size = BIT_SIZE(ft);
3064 /* remove bit field info to avoid loops */
3065 vtop[-1].type.t = ft & ~VT_STRUCT_MASK;
3067 if ((ft & VT_BTYPE) == VT_BOOL) {
3068 gen_cast(&vtop[-1].type);
3069 vtop[-1].type.t = (vtop[-1].type.t & ~VT_BTYPE) | (VT_BYTE | VT_UNSIGNED);
3072 r = adjust_bf(vtop - 1, bit_pos, bit_size);
3073 if (r == VT_STRUCT) {
3074 gen_cast_s((ft & VT_BTYPE) == VT_LLONG ? VT_LLONG : VT_INT);
3075 store_packed_bf(bit_pos, bit_size);
3076 } else {
3077 unsigned long long mask = (1ULL << bit_size) - 1;
3078 if ((ft & VT_BTYPE) != VT_BOOL) {
3079 /* mask source */
3080 if ((vtop[-1].type.t & VT_BTYPE) == VT_LLONG)
3081 vpushll(mask);
3082 else
3083 vpushi((unsigned)mask);
3084 gen_op('&');
3086 /* shift source */
3087 vpushi(bit_pos);
3088 gen_op(TOK_SHL);
3089 vswap();
3090 /* duplicate destination */
3091 vdup();
3092 vrott(3);
3093 /* load destination, mask and or with source */
3094 if ((vtop->type.t & VT_BTYPE) == VT_LLONG)
3095 vpushll(~(mask << bit_pos));
3096 else
3097 vpushi(~((unsigned)mask << bit_pos));
3098 gen_op('&');
3099 gen_op('|');
3100 /* store result */
3101 vstore();
3102 /* ... and discard */
3103 vpop();
3105 } else {
3106 #ifdef CONFIG_TCC_BCHECK
3107 /* bound check case */
3108 if (vtop[-1].r & VT_MUSTBOUND) {
3109 vswap();
3110 gbound();
3111 vswap();
3113 #endif
3114 rc = RC_INT;
3115 if (is_float(ft)) {
3116 rc = RC_FLOAT;
3117 #ifdef TCC_TARGET_X86_64
3118 if ((ft & VT_BTYPE) == VT_LDOUBLE) {
3119 rc = RC_ST0;
3120 } else if ((ft & VT_BTYPE) == VT_QFLOAT) {
3121 rc = RC_FRET;
3123 #endif
3125 r = gv(rc); /* generate value */
3126 /* if lvalue was saved on stack, must read it */
3127 if ((vtop[-1].r & VT_VALMASK) == VT_LLOCAL) {
3128 SValue sv;
3129 t = get_reg(RC_INT);
3130 #if PTR_SIZE == 8
3131 sv.type.t = VT_PTR;
3132 #else
3133 sv.type.t = VT_INT;
3134 #endif
3135 sv.r = VT_LOCAL | VT_LVAL;
3136 sv.c.i = vtop[-1].c.i;
3137 load(t, &sv);
3138 vtop[-1].r = t | VT_LVAL;
3140 /* two word case handling : store second register at word + 4 (or +8 for x86-64) */
3141 #if PTR_SIZE == 8
3142 if (((ft & VT_BTYPE) == VT_QLONG) || ((ft & VT_BTYPE) == VT_QFLOAT)) {
3143 int addr_type = VT_LLONG, load_size = 8, load_type = ((vtop->type.t & VT_BTYPE) == VT_QLONG) ? VT_LLONG : VT_DOUBLE;
3144 #else
3145 if ((ft & VT_BTYPE) == VT_LLONG) {
3146 int addr_type = VT_INT, load_size = 4, load_type = VT_INT;
3147 #endif
3148 vtop[-1].type.t = load_type;
3149 store(r, vtop - 1);
3150 vswap();
3151 /* convert to int to increment easily */
3152 vtop->type.t = addr_type;
3153 gaddrof();
3154 vpushi(load_size);
3155 gen_op('+');
3156 vtop->r |= VT_LVAL;
3157 vswap();
3158 vtop[-1].type.t = load_type;
3159 /* XXX: it works because r2 is spilled last ! */
3160 store(vtop->r2, vtop - 1);
3161 } else {
3162 store(r, vtop - 1);
3165 vswap();
3166 vtop--; /* NOT vpop() because on x86 it would flush the fp stack */
3167 vtop->r |= delayed_cast;
3171 /* post defines POST/PRE add. c is the token ++ or -- */
3172 ST_FUNC void inc(int post, int c)
3174 test_lvalue();
3175 vdup(); /* save lvalue */
3176 if (post) {
3177 gv_dup(); /* duplicate value */
3178 vrotb(3);
3179 vrotb(3);
3181 /* add constant */
3182 vpushi(c - TOK_MID);
3183 gen_op('+');
3184 vstore(); /* store value */
3185 if (post)
3186 vpop(); /* if post op, return saved value */
3189 ST_FUNC void parse_mult_str (CString *astr, const char *msg)
3191 /* read the string */
3192 if (tok != TOK_STR)
3193 expect(msg);
3194 cstr_new(astr);
3195 while (tok == TOK_STR) {
3196 /* XXX: add \0 handling too ? */
3197 cstr_cat(astr, tokc.str.data, -1);
3198 next();
3200 cstr_ccat(astr, '\0');
3203 /* If I is >= 1 and a power of two, returns log2(i)+1.
3204 If I is 0 returns 0. */
3205 static int exact_log2p1(int i)
3207 int ret;
3208 if (!i)
3209 return 0;
3210 for (ret = 1; i >= 1 << 8; ret += 8)
3211 i >>= 8;
3212 if (i >= 1 << 4)
3213 ret += 4, i >>= 4;
3214 if (i >= 1 << 2)
3215 ret += 2, i >>= 2;
3216 if (i >= 1 << 1)
3217 ret++;
3218 return ret;
3221 /* Parse __attribute__((...)) GNUC extension. */
3222 static void parse_attribute(AttributeDef *ad)
3224 int t, n;
3225 CString astr;
3227 redo:
3228 if (tok != TOK_ATTRIBUTE1 && tok != TOK_ATTRIBUTE2)
3229 return;
3230 next();
3231 skip('(');
3232 skip('(');
3233 while (tok != ')') {
3234 if (tok < TOK_IDENT)
3235 expect("attribute name");
3236 t = tok;
3237 next();
3238 switch(t) {
3239 case TOK_SECTION1:
3240 case TOK_SECTION2:
3241 skip('(');
3242 parse_mult_str(&astr, "section name");
3243 ad->section = find_section(tcc_state, (char *)astr.data);
3244 skip(')');
3245 cstr_free(&astr);
3246 break;
3247 case TOK_ALIAS1:
3248 case TOK_ALIAS2:
3249 skip('(');
3250 parse_mult_str(&astr, "alias(\"target\")");
3251 ad->alias_target = /* save string as token, for later */
3252 tok_alloc((char*)astr.data, astr.size-1)->tok;
3253 skip(')');
3254 cstr_free(&astr);
3255 break;
3256 case TOK_VISIBILITY1:
3257 case TOK_VISIBILITY2:
3258 skip('(');
3259 parse_mult_str(&astr,
3260 "visibility(\"default|hidden|internal|protected\")");
3261 if (!strcmp (astr.data, "default"))
3262 ad->a.visibility = STV_DEFAULT;
3263 else if (!strcmp (astr.data, "hidden"))
3264 ad->a.visibility = STV_HIDDEN;
3265 else if (!strcmp (astr.data, "internal"))
3266 ad->a.visibility = STV_INTERNAL;
3267 else if (!strcmp (astr.data, "protected"))
3268 ad->a.visibility = STV_PROTECTED;
3269 else
3270 expect("visibility(\"default|hidden|internal|protected\")");
3271 skip(')');
3272 cstr_free(&astr);
3273 break;
3274 case TOK_ALIGNED1:
3275 case TOK_ALIGNED2:
3276 if (tok == '(') {
3277 next();
3278 n = expr_const();
3279 if (n <= 0 || (n & (n - 1)) != 0)
3280 tcc_error("alignment must be a positive power of two");
3281 skip(')');
3282 } else {
3283 n = MAX_ALIGN;
3285 ad->a.aligned = exact_log2p1(n);
3286 if (n != 1 << (ad->a.aligned - 1))
3287 tcc_error("alignment of %d is larger than implemented", n);
3288 break;
3289 case TOK_PACKED1:
3290 case TOK_PACKED2:
3291 ad->a.packed = 1;
3292 break;
3293 case TOK_WEAK1:
3294 case TOK_WEAK2:
3295 ad->a.weak = 1;
3296 break;
3297 case TOK_UNUSED1:
3298 case TOK_UNUSED2:
3299 /* currently, no need to handle it because tcc does not
3300 track unused objects */
3301 break;
3302 case TOK_NORETURN1:
3303 case TOK_NORETURN2:
3304 /* currently, no need to handle it because tcc does not
3305 track unused objects */
3306 break;
3307 case TOK_CDECL1:
3308 case TOK_CDECL2:
3309 case TOK_CDECL3:
3310 ad->f.func_call = FUNC_CDECL;
3311 break;
3312 case TOK_STDCALL1:
3313 case TOK_STDCALL2:
3314 case TOK_STDCALL3:
3315 ad->f.func_call = FUNC_STDCALL;
3316 break;
3317 #ifdef TCC_TARGET_I386
3318 case TOK_REGPARM1:
3319 case TOK_REGPARM2:
3320 skip('(');
3321 n = expr_const();
3322 if (n > 3)
3323 n = 3;
3324 else if (n < 0)
3325 n = 0;
3326 if (n > 0)
3327 ad->f.func_call = FUNC_FASTCALL1 + n - 1;
3328 skip(')');
3329 break;
3330 case TOK_FASTCALL1:
3331 case TOK_FASTCALL2:
3332 case TOK_FASTCALL3:
3333 ad->f.func_call = FUNC_FASTCALLW;
3334 break;
3335 #endif
3336 case TOK_MODE:
3337 skip('(');
3338 switch(tok) {
3339 case TOK_MODE_DI:
3340 ad->attr_mode = VT_LLONG + 1;
3341 break;
3342 case TOK_MODE_QI:
3343 ad->attr_mode = VT_BYTE + 1;
3344 break;
3345 case TOK_MODE_HI:
3346 ad->attr_mode = VT_SHORT + 1;
3347 break;
3348 case TOK_MODE_SI:
3349 case TOK_MODE_word:
3350 ad->attr_mode = VT_INT + 1;
3351 break;
3352 default:
3353 tcc_warning("__mode__(%s) not supported\n", get_tok_str(tok, NULL));
3354 break;
3356 next();
3357 skip(')');
3358 break;
3359 case TOK_DLLEXPORT:
3360 ad->a.dllexport = 1;
3361 break;
3362 case TOK_DLLIMPORT:
3363 ad->a.dllimport = 1;
3364 break;
3365 default:
3366 if (tcc_state->warn_unsupported)
3367 tcc_warning("'%s' attribute ignored", get_tok_str(t, NULL));
3368 /* skip parameters */
3369 if (tok == '(') {
3370 int parenthesis = 0;
3371 do {
3372 if (tok == '(')
3373 parenthesis++;
3374 else if (tok == ')')
3375 parenthesis--;
3376 next();
3377 } while (parenthesis && tok != -1);
3379 break;
3381 if (tok != ',')
3382 break;
3383 next();
3385 skip(')');
3386 skip(')');
3387 goto redo;
3390 static Sym * find_field (CType *type, int v)
3392 Sym *s = type->ref;
3393 v |= SYM_FIELD;
3394 while ((s = s->next) != NULL) {
3395 if ((s->v & SYM_FIELD) &&
3396 (s->type.t & VT_BTYPE) == VT_STRUCT &&
3397 (s->v & ~SYM_FIELD) >= SYM_FIRST_ANOM) {
3398 Sym *ret = find_field (&s->type, v);
3399 if (ret)
3400 return ret;
3402 if (s->v == v)
3403 break;
3405 return s;
3408 static void struct_add_offset (Sym *s, int offset)
3410 while ((s = s->next) != NULL) {
3411 if ((s->v & SYM_FIELD) &&
3412 (s->type.t & VT_BTYPE) == VT_STRUCT &&
3413 (s->v & ~SYM_FIELD) >= SYM_FIRST_ANOM) {
3414 struct_add_offset(s->type.ref, offset);
3415 } else
3416 s->c += offset;
3420 static void struct_layout(CType *type, AttributeDef *ad)
3422 int size, align, maxalign, offset, c, bit_pos, bit_size;
3423 int packed, a, bt, prevbt, prev_bit_size;
3424 int pcc = !tcc_state->ms_bitfields;
3425 int pragma_pack = *tcc_state->pack_stack_ptr;
3426 Sym *f;
3428 maxalign = 1;
3429 offset = 0;
3430 c = 0;
3431 bit_pos = 0;
3432 prevbt = VT_STRUCT; /* make it never match */
3433 prev_bit_size = 0;
3435 //#define BF_DEBUG
3437 for (f = type->ref->next; f; f = f->next) {
3438 if (f->type.t & VT_BITFIELD)
3439 bit_size = BIT_SIZE(f->type.t);
3440 else
3441 bit_size = -1;
3442 size = type_size(&f->type, &align);
3443 a = f->a.aligned ? 1 << (f->a.aligned - 1) : 0;
3444 packed = 0;
3446 if (pcc && bit_size == 0) {
3447 /* in pcc mode, packing does not affect zero-width bitfields */
3449 } else {
3450 /* in pcc mode, attribute packed overrides if set. */
3451 if (pcc && (f->a.packed || ad->a.packed))
3452 align = packed = 1;
3454 /* pragma pack overrides align if lesser and packs bitfields always */
3455 if (pragma_pack) {
3456 packed = 1;
3457 if (pragma_pack < align)
3458 align = pragma_pack;
3459 /* in pcc mode pragma pack also overrides individual align */
3460 if (pcc && pragma_pack < a)
3461 a = 0;
3464 /* some individual align was specified */
3465 if (a)
3466 align = a;
3468 if (type->ref->type.t == VT_UNION) {
3469 if (pcc && bit_size >= 0)
3470 size = (bit_size + 7) >> 3;
3471 offset = 0;
3472 if (size > c)
3473 c = size;
3475 } else if (bit_size < 0) {
3476 if (pcc)
3477 c += (bit_pos + 7) >> 3;
3478 c = (c + align - 1) & -align;
3479 offset = c;
3480 if (size > 0)
3481 c += size;
3482 bit_pos = 0;
3483 prevbt = VT_STRUCT;
3484 prev_bit_size = 0;
3486 } else {
3487 /* A bit-field. Layout is more complicated. There are two
3488 options: PCC (GCC) compatible and MS compatible */
3489 if (pcc) {
3490 /* In PCC layout a bit-field is placed adjacent to the
3491 preceding bit-fields, except if:
3492 - it has zero-width
3493 - an individual alignment was given
3494 - it would overflow its base type container and
3495 there is no packing */
3496 if (bit_size == 0) {
3497 new_field:
3498 c = (c + ((bit_pos + 7) >> 3) + align - 1) & -align;
3499 bit_pos = 0;
3500 } else if (f->a.aligned) {
3501 goto new_field;
3502 } else if (!packed) {
3503 int a8 = align * 8;
3504 int ofs = ((c * 8 + bit_pos) % a8 + bit_size + a8 - 1) / a8;
3505 if (ofs > size / align)
3506 goto new_field;
3509 /* in pcc mode, long long bitfields have type int if they fit */
3510 if (size == 8 && bit_size <= 32)
3511 f->type.t = (f->type.t & ~VT_BTYPE) | VT_INT, size = 4;
3513 while (bit_pos >= align * 8)
3514 c += align, bit_pos -= align * 8;
3515 offset = c;
3517 /* In PCC layout named bit-fields influence the alignment
3518 of the containing struct using the base types alignment,
3519 except for packed fields (which here have correct align). */
3520 if (f->v & SYM_FIRST_ANOM
3521 // && bit_size // ??? gcc on ARM/rpi does that
3523 align = 1;
3525 } else {
3526 bt = f->type.t & VT_BTYPE;
3527 if ((bit_pos + bit_size > size * 8)
3528 || (bit_size > 0) == (bt != prevbt)
3530 c = (c + align - 1) & -align;
3531 offset = c;
3532 bit_pos = 0;
3533 /* In MS bitfield mode a bit-field run always uses
3534 at least as many bits as the underlying type.
3535 To start a new run it's also required that this
3536 or the last bit-field had non-zero width. */
3537 if (bit_size || prev_bit_size)
3538 c += size;
3540 /* In MS layout the records alignment is normally
3541 influenced by the field, except for a zero-width
3542 field at the start of a run (but by further zero-width
3543 fields it is again). */
3544 if (bit_size == 0 && prevbt != bt)
3545 align = 1;
3546 prevbt = bt;
3547 prev_bit_size = bit_size;
3550 f->type.t = (f->type.t & ~(0x3f << VT_STRUCT_SHIFT))
3551 | (bit_pos << VT_STRUCT_SHIFT);
3552 bit_pos += bit_size;
3554 if (align > maxalign)
3555 maxalign = align;
3557 #ifdef BF_DEBUG
3558 printf("set field %s offset %-2d size %-2d align %-2d",
3559 get_tok_str(f->v & ~SYM_FIELD, NULL), offset, size, align);
3560 if (f->type.t & VT_BITFIELD) {
3561 printf(" pos %-2d bits %-2d",
3562 BIT_POS(f->type.t),
3563 BIT_SIZE(f->type.t)
3566 printf("\n");
3567 #endif
3569 if (f->v & SYM_FIRST_ANOM && (f->type.t & VT_BTYPE) == VT_STRUCT) {
3570 Sym *ass;
3571 /* An anonymous struct/union. Adjust member offsets
3572 to reflect the real offset of our containing struct.
3573 Also set the offset of this anon member inside
3574 the outer struct to be zero. Via this it
3575 works when accessing the field offset directly
3576 (from base object), as well as when recursing
3577 members in initializer handling. */
3578 int v2 = f->type.ref->v;
3579 if (!(v2 & SYM_FIELD) &&
3580 (v2 & ~SYM_STRUCT) < SYM_FIRST_ANOM) {
3581 Sym **pps;
3582 /* This happens only with MS extensions. The
3583 anon member has a named struct type, so it
3584 potentially is shared with other references.
3585 We need to unshare members so we can modify
3586 them. */
3587 ass = f->type.ref;
3588 f->type.ref = sym_push(anon_sym++ | SYM_FIELD,
3589 &f->type.ref->type, 0,
3590 f->type.ref->c);
3591 pps = &f->type.ref->next;
3592 while ((ass = ass->next) != NULL) {
3593 *pps = sym_push(ass->v, &ass->type, 0, ass->c);
3594 pps = &((*pps)->next);
3596 *pps = NULL;
3598 struct_add_offset(f->type.ref, offset);
3599 f->c = 0;
3600 } else {
3601 f->c = offset;
3604 f->r = 0;
3607 if (pcc)
3608 c += (bit_pos + 7) >> 3;
3610 /* store size and alignment */
3611 a = bt = ad->a.aligned ? 1 << (ad->a.aligned - 1) : 1;
3612 if (a < maxalign)
3613 a = maxalign;
3614 type->ref->r = a;
3615 if (pragma_pack && pragma_pack < maxalign && 0 == pcc) {
3616 /* can happen if individual align for some member was given. In
3617 this case MSVC ignores maxalign when aligning the size */
3618 a = pragma_pack;
3619 if (a < bt)
3620 a = bt;
3622 c = (c + a - 1) & -a;
3623 type->ref->c = c;
3625 #ifdef BF_DEBUG
3626 printf("struct size %-2d align %-2d\n\n", c, a), fflush(stdout);
3627 #endif
3629 /* check whether we can access bitfields by their type */
3630 for (f = type->ref->next; f; f = f->next) {
3631 int s, px, cx, c0;
3632 CType t;
3634 if (0 == (f->type.t & VT_BITFIELD))
3635 continue;
3636 f->type.ref = f;
3637 f->auxtype = -1;
3638 bit_size = BIT_SIZE(f->type.t);
3639 if (bit_size == 0)
3640 continue;
3641 bit_pos = BIT_POS(f->type.t);
3642 size = type_size(&f->type, &align);
3643 if (bit_pos + bit_size <= size * 8 && f->c + size <= c)
3644 continue;
3646 /* try to access the field using a differnt type */
3647 c0 = -1, s = align = 1;
3648 for (;;) {
3649 px = f->c * 8 + bit_pos;
3650 cx = (px >> 3) & -align;
3651 px = px - (cx << 3);
3652 if (c0 == cx)
3653 break;
3654 s = (px + bit_size + 7) >> 3;
3655 if (s > 4) {
3656 t.t = VT_LLONG;
3657 } else if (s > 2) {
3658 t.t = VT_INT;
3659 } else if (s > 1) {
3660 t.t = VT_SHORT;
3661 } else {
3662 t.t = VT_BYTE;
3664 s = type_size(&t, &align);
3665 c0 = cx;
3668 if (px + bit_size <= s * 8 && cx + s <= c) {
3669 /* update offset and bit position */
3670 f->c = cx;
3671 bit_pos = px;
3672 f->type.t = (f->type.t & ~(0x3f << VT_STRUCT_SHIFT))
3673 | (bit_pos << VT_STRUCT_SHIFT);
3674 if (s != size)
3675 f->auxtype = t.t;
3676 #ifdef BF_DEBUG
3677 printf("FIX field %s offset %-2d size %-2d align %-2d "
3678 "pos %-2d bits %-2d\n",
3679 get_tok_str(f->v & ~SYM_FIELD, NULL),
3680 cx, s, align, px, bit_size);
3681 #endif
3682 } else {
3683 /* fall back to load/store single-byte wise */
3684 f->auxtype = VT_STRUCT;
3685 #ifdef BF_DEBUG
3686 printf("FIX field %s : load byte-wise\n",
3687 get_tok_str(f->v & ~SYM_FIELD, NULL));
3688 #endif
3693 /* enum/struct/union declaration. u is VT_ENUM/VT_STRUCT/VT_UNION */
3694 static void struct_decl(CType *type, int u)
3696 int v, c, size, align, flexible;
3697 int bit_size, bsize, bt;
3698 Sym *s, *ss, **ps;
3699 AttributeDef ad, ad1;
3700 CType type1, btype;
3702 memset(&ad, 0, sizeof ad);
3703 next();
3704 parse_attribute(&ad);
3705 if (tok != '{') {
3706 v = tok;
3707 next();
3708 /* struct already defined ? return it */
3709 if (v < TOK_IDENT)
3710 expect("struct/union/enum name");
3711 s = struct_find(v);
3712 if (s && (s->sym_scope == local_scope || tok != '{')) {
3713 if (u == s->type.t)
3714 goto do_decl;
3715 if (u == VT_ENUM && IS_ENUM(s->type.t))
3716 goto do_decl;
3717 tcc_error("redefinition of '%s'", get_tok_str(v, NULL));
3719 } else {
3720 v = anon_sym++;
3722 /* Record the original enum/struct/union token. */
3723 type1.t = u == VT_ENUM ? u | VT_INT | VT_UNSIGNED : u;
3724 type1.ref = NULL;
3725 /* we put an undefined size for struct/union */
3726 s = sym_push(v | SYM_STRUCT, &type1, 0, -1);
3727 s->r = 0; /* default alignment is zero as gcc */
3728 do_decl:
3729 type->t = s->type.t;
3730 type->ref = s;
3732 if (tok == '{') {
3733 next();
3734 if (s->c != -1)
3735 tcc_error("struct/union/enum already defined");
3736 /* cannot be empty */
3737 /* non empty enums are not allowed */
3738 ps = &s->next;
3739 if (u == VT_ENUM) {
3740 long long ll = 0, pl = 0, nl = 0;
3741 CType t;
3742 t.ref = s;
3743 /* enum symbols have static storage */
3744 t.t = VT_INT|VT_STATIC|VT_ENUM_VAL;
3745 for(;;) {
3746 v = tok;
3747 if (v < TOK_UIDENT)
3748 expect("identifier");
3749 ss = sym_find(v);
3750 if (ss && !local_stack)
3751 tcc_error("redefinition of enumerator '%s'",
3752 get_tok_str(v, NULL));
3753 next();
3754 if (tok == '=') {
3755 next();
3756 ll = expr_const64();
3758 ss = sym_push(v, &t, VT_CONST, 0);
3759 ss->enum_val = ll;
3760 *ps = ss, ps = &ss->next;
3761 if (ll < nl)
3762 nl = ll;
3763 if (ll > pl)
3764 pl = ll;
3765 if (tok != ',')
3766 break;
3767 next();
3768 ll++;
3769 /* NOTE: we accept a trailing comma */
3770 if (tok == '}')
3771 break;
3773 skip('}');
3774 /* set integral type of the enum */
3775 t.t = VT_INT;
3776 if (nl >= 0) {
3777 if (pl != (unsigned)pl)
3778 t.t = VT_LLONG;
3779 t.t |= VT_UNSIGNED;
3780 } else if (pl != (int)pl || nl != (int)nl)
3781 t.t = VT_LLONG;
3782 s->type.t = type->t = t.t | VT_ENUM;
3783 s->c = 0;
3784 /* set type for enum members */
3785 for (ss = s->next; ss; ss = ss->next) {
3786 ll = ss->enum_val;
3787 if (ll == (int)ll) /* default is int if it fits */
3788 continue;
3789 if (t.t & VT_UNSIGNED) {
3790 ss->type.t |= VT_UNSIGNED;
3791 if (ll == (unsigned)ll)
3792 continue;
3794 ss->type.t = (ss->type.t & ~VT_BTYPE) | VT_LLONG;
3796 } else {
3797 c = 0;
3798 flexible = 0;
3799 while (tok != '}') {
3800 if (!parse_btype(&btype, &ad1)) {
3801 skip(';');
3802 continue;
3804 while (1) {
3805 if (flexible)
3806 tcc_error("flexible array member '%s' not at the end of struct",
3807 get_tok_str(v, NULL));
3808 bit_size = -1;
3809 v = 0;
3810 type1 = btype;
3811 if (tok != ':') {
3812 if (tok != ';')
3813 type_decl(&type1, &ad1, &v, TYPE_DIRECT);
3814 if (v == 0) {
3815 if ((type1.t & VT_BTYPE) != VT_STRUCT)
3816 expect("identifier");
3817 else {
3818 int v = btype.ref->v;
3819 if (!(v & SYM_FIELD) && (v & ~SYM_STRUCT) < SYM_FIRST_ANOM) {
3820 if (tcc_state->ms_extensions == 0)
3821 expect("identifier");
3825 if (type_size(&type1, &align) < 0) {
3826 if ((u == VT_STRUCT) && (type1.t & VT_ARRAY) && c)
3827 flexible = 1;
3828 else
3829 tcc_error("field '%s' has incomplete type",
3830 get_tok_str(v, NULL));
3832 if ((type1.t & VT_BTYPE) == VT_FUNC ||
3833 (type1.t & VT_STORAGE))
3834 tcc_error("invalid type for '%s'",
3835 get_tok_str(v, NULL));
3837 if (tok == ':') {
3838 next();
3839 bit_size = expr_const();
3840 /* XXX: handle v = 0 case for messages */
3841 if (bit_size < 0)
3842 tcc_error("negative width in bit-field '%s'",
3843 get_tok_str(v, NULL));
3844 if (v && bit_size == 0)
3845 tcc_error("zero width for bit-field '%s'",
3846 get_tok_str(v, NULL));
3847 parse_attribute(&ad1);
3849 size = type_size(&type1, &align);
3850 if (bit_size >= 0) {
3851 bt = type1.t & VT_BTYPE;
3852 if (bt != VT_INT &&
3853 bt != VT_BYTE &&
3854 bt != VT_SHORT &&
3855 bt != VT_BOOL &&
3856 bt != VT_LLONG)
3857 tcc_error("bitfields must have scalar type");
3858 bsize = size * 8;
3859 if (bit_size > bsize) {
3860 tcc_error("width of '%s' exceeds its type",
3861 get_tok_str(v, NULL));
3862 } else if (bit_size == bsize
3863 && !ad.a.packed && !ad1.a.packed) {
3864 /* no need for bit fields */
3866 } else if (bit_size == 64) {
3867 tcc_error("field width 64 not implemented");
3868 } else {
3869 type1.t = (type1.t & ~VT_STRUCT_MASK)
3870 | VT_BITFIELD
3871 | (bit_size << (VT_STRUCT_SHIFT + 6));
3874 if (v != 0 || (type1.t & VT_BTYPE) == VT_STRUCT) {
3875 /* Remember we've seen a real field to check
3876 for placement of flexible array member. */
3877 c = 1;
3879 /* If member is a struct or bit-field, enforce
3880 placing into the struct (as anonymous). */
3881 if (v == 0 &&
3882 ((type1.t & VT_BTYPE) == VT_STRUCT ||
3883 bit_size >= 0)) {
3884 v = anon_sym++;
3886 if (v) {
3887 ss = sym_push(v | SYM_FIELD, &type1, 0, 0);
3888 ss->a = ad1.a;
3889 *ps = ss;
3890 ps = &ss->next;
3892 if (tok == ';' || tok == TOK_EOF)
3893 break;
3894 skip(',');
3896 skip(';');
3898 skip('}');
3899 parse_attribute(&ad);
3900 struct_layout(type, &ad);
3905 static void sym_to_attr(AttributeDef *ad, Sym *s)
3907 if (s->a.aligned && 0 == ad->a.aligned)
3908 ad->a.aligned = s->a.aligned;
3909 if (s->f.func_call && 0 == ad->f.func_call)
3910 ad->f.func_call = s->f.func_call;
3911 if (s->f.func_type && 0 == ad->f.func_type)
3912 ad->f.func_type = s->f.func_type;
3913 if (s->a.packed)
3914 ad->a.packed = 1;
3917 /* Add type qualifiers to a type. If the type is an array then the qualifiers
3918 are added to the element type, copied because it could be a typedef. */
3919 static void parse_btype_qualify(CType *type, int qualifiers)
3921 while (type->t & VT_ARRAY) {
3922 type->ref = sym_push(SYM_FIELD, &type->ref->type, 0, type->ref->c);
3923 type = &type->ref->type;
3925 type->t |= qualifiers;
3928 /* return 0 if no type declaration. otherwise, return the basic type
3929 and skip it.
3931 static int parse_btype(CType *type, AttributeDef *ad)
3933 int t, u, bt, st, type_found, typespec_found, g;
3934 Sym *s;
3935 CType type1;
3937 memset(ad, 0, sizeof(AttributeDef));
3938 type_found = 0;
3939 typespec_found = 0;
3940 t = VT_INT;
3941 bt = st = -1;
3942 type->ref = NULL;
3944 while(1) {
3945 switch(tok) {
3946 case TOK_EXTENSION:
3947 /* currently, we really ignore extension */
3948 next();
3949 continue;
3951 /* basic types */
3952 case TOK_CHAR:
3953 u = VT_BYTE;
3954 basic_type:
3955 next();
3956 basic_type1:
3957 if (u == VT_SHORT || u == VT_LONG) {
3958 if (st != -1 || (bt != -1 && bt != VT_INT))
3959 tmbt: tcc_error("too many basic types");
3960 st = u;
3961 } else {
3962 if (bt != -1 || (st != -1 && u != VT_INT))
3963 goto tmbt;
3964 bt = u;
3966 if (u != VT_INT)
3967 t = (t & ~VT_BTYPE) | u;
3968 typespec_found = 1;
3969 break;
3970 case TOK_VOID:
3971 u = VT_VOID;
3972 goto basic_type;
3973 case TOK_SHORT:
3974 u = VT_SHORT;
3975 goto basic_type;
3976 case TOK_INT:
3977 u = VT_INT;
3978 goto basic_type;
3979 case TOK_LONG:
3980 if ((t & VT_BTYPE) == VT_DOUBLE) {
3981 #ifndef TCC_TARGET_PE
3982 t = (t & ~(VT_LONG | VT_BTYPE)) | VT_LDOUBLE;
3983 #endif
3984 } else if (t & VT_LONG) {
3985 t = (t & ~(VT_LONG | VT_BTYPE)) | VT_LLONG;
3986 } else {
3987 u = VT_LONG;
3988 goto basic_type;
3990 next();
3991 break;
3992 #ifdef TCC_TARGET_ARM64
3993 case TOK_UINT128:
3994 /* GCC's __uint128_t appears in some Linux header files. Make it a
3995 synonym for long double to get the size and alignment right. */
3996 u = VT_LDOUBLE;
3997 goto basic_type;
3998 #endif
3999 case TOK_BOOL:
4000 u = VT_BOOL;
4001 goto basic_type;
4002 case TOK_FLOAT:
4003 u = VT_FLOAT;
4004 goto basic_type;
4005 case TOK_DOUBLE:
4006 if (t & VT_LONG) {
4007 #ifdef TCC_TARGET_PE
4008 t = (t & ~(VT_LONG | VT_BTYPE)) | VT_DOUBLE;
4009 #else
4010 t = (t & ~(VT_LONG | VT_BTYPE)) | VT_LDOUBLE;
4011 #endif
4012 } else {
4013 u = VT_DOUBLE;
4014 goto basic_type;
4016 next();
4017 break;
4018 case TOK_ENUM:
4019 struct_decl(&type1, VT_ENUM);
4020 basic_type2:
4021 u = type1.t;
4022 type->ref = type1.ref;
4023 goto basic_type1;
4024 case TOK_STRUCT:
4025 struct_decl(&type1, VT_STRUCT);
4026 goto basic_type2;
4027 case TOK_UNION:
4028 struct_decl(&type1, VT_UNION);
4029 goto basic_type2;
4031 /* type modifiers */
4032 case TOK_CONST1:
4033 case TOK_CONST2:
4034 case TOK_CONST3:
4035 type->t = t;
4036 parse_btype_qualify(type, VT_CONSTANT);
4037 t = type->t;
4038 next();
4039 break;
4040 case TOK_VOLATILE1:
4041 case TOK_VOLATILE2:
4042 case TOK_VOLATILE3:
4043 type->t = t;
4044 parse_btype_qualify(type, VT_VOLATILE);
4045 t = type->t;
4046 next();
4047 break;
4048 case TOK_SIGNED1:
4049 case TOK_SIGNED2:
4050 case TOK_SIGNED3:
4051 if ((t & (VT_DEFSIGN|VT_UNSIGNED)) == (VT_DEFSIGN|VT_UNSIGNED))
4052 tcc_error("signed and unsigned modifier");
4053 t |= VT_DEFSIGN;
4054 next();
4055 typespec_found = 1;
4056 break;
4057 case TOK_REGISTER:
4058 case TOK_AUTO:
4059 case TOK_RESTRICT1:
4060 case TOK_RESTRICT2:
4061 case TOK_RESTRICT3:
4062 next();
4063 break;
4064 case TOK_UNSIGNED:
4065 if ((t & (VT_DEFSIGN|VT_UNSIGNED)) == VT_DEFSIGN)
4066 tcc_error("signed and unsigned modifier");
4067 t |= VT_DEFSIGN | VT_UNSIGNED;
4068 next();
4069 typespec_found = 1;
4070 break;
4072 /* storage */
4073 case TOK_EXTERN:
4074 g = VT_EXTERN;
4075 goto storage;
4076 case TOK_STATIC:
4077 g = VT_STATIC;
4078 goto storage;
4079 case TOK_TYPEDEF:
4080 g = VT_TYPEDEF;
4081 goto storage;
4082 storage:
4083 if (t & (VT_EXTERN|VT_STATIC|VT_TYPEDEF) & ~g)
4084 tcc_error("multiple storage classes");
4085 t |= g;
4086 next();
4087 break;
4088 case TOK_INLINE1:
4089 case TOK_INLINE2:
4090 case TOK_INLINE3:
4091 t |= VT_INLINE;
4092 next();
4093 break;
4095 /* GNUC attribute */
4096 case TOK_ATTRIBUTE1:
4097 case TOK_ATTRIBUTE2:
4098 parse_attribute(ad);
4099 if (ad->attr_mode) {
4100 u = ad->attr_mode -1;
4101 t = (t & ~VT_BTYPE) | u;
4103 break;
4104 /* GNUC typeof */
4105 case TOK_TYPEOF1:
4106 case TOK_TYPEOF2:
4107 case TOK_TYPEOF3:
4108 next();
4109 parse_expr_type(&type1);
4110 /* remove all storage modifiers except typedef */
4111 type1.t &= ~(VT_STORAGE&~VT_TYPEDEF);
4112 if (type1.ref)
4113 sym_to_attr(ad, type1.ref);
4114 goto basic_type2;
4115 default:
4116 if (typespec_found)
4117 goto the_end;
4118 s = sym_find(tok);
4119 if (!s || !(s->type.t & VT_TYPEDEF))
4120 goto the_end;
4121 t &= ~VT_BTYPE;
4122 u = t & ~(VT_CONSTANT | VT_VOLATILE), t ^= u;
4123 type->t = (s->type.t & ~VT_TYPEDEF) | u;
4124 type->ref = s->type.ref;
4125 if (t)
4126 parse_btype_qualify(type, t);
4127 t = type->t;
4128 /* get attributes from typedef */
4129 sym_to_attr(ad, s);
4130 next();
4131 typespec_found = 1;
4132 st = bt = -2;
4133 break;
4135 type_found = 1;
4137 the_end:
4138 if (tcc_state->char_is_unsigned) {
4139 if ((t & (VT_DEFSIGN|VT_BTYPE)) == VT_BYTE)
4140 t |= VT_UNSIGNED;
4143 /* long is never used as type */
4144 if (t & VT_LONG)
4145 #if PTR_SIZE == 8 && !defined TCC_TARGET_PE
4146 t = (t & ~VT_BTYPE) | VT_LLONG;
4147 #else
4148 t = (t & ~VT_BTYPE) | VT_INT;
4149 #endif
4150 type->t = t;
4151 return type_found;
4154 /* convert a function parameter type (array to pointer and function to
4155 function pointer) */
4156 static inline void convert_parameter_type(CType *pt)
4158 /* remove const and volatile qualifiers (XXX: const could be used
4159 to indicate a const function parameter */
4160 pt->t &= ~(VT_CONSTANT | VT_VOLATILE);
4161 /* array must be transformed to pointer according to ANSI C */
4162 pt->t &= ~VT_ARRAY;
4163 if ((pt->t & VT_BTYPE) == VT_FUNC) {
4164 mk_pointer(pt);
4168 ST_FUNC void parse_asm_str(CString *astr)
4170 skip('(');
4171 parse_mult_str(astr, "string constant");
4174 /* Parse an asm label and return the token */
4175 static int asm_label_instr(void)
4177 int v;
4178 CString astr;
4180 next();
4181 parse_asm_str(&astr);
4182 skip(')');
4183 #ifdef ASM_DEBUG
4184 printf("asm_alias: \"%s\"\n", (char *)astr.data);
4185 #endif
4186 v = tok_alloc(astr.data, astr.size - 1)->tok;
4187 cstr_free(&astr);
4188 return v;
4191 static int post_type(CType *type, AttributeDef *ad, int storage, int td)
4193 int n, l, t1, arg_size, align;
4194 Sym **plast, *s, *first;
4195 AttributeDef ad1;
4196 CType pt;
4198 if (tok == '(') {
4199 /* function type, or recursive declarator (return if so) */
4200 next();
4201 if (td && !(td & TYPE_ABSTRACT))
4202 return 0;
4203 if (tok == ')')
4204 l = 0;
4205 else if (parse_btype(&pt, &ad1))
4206 l = FUNC_NEW;
4207 else if (td)
4208 return 0;
4209 else
4210 l = FUNC_OLD;
4211 first = NULL;
4212 plast = &first;
4213 arg_size = 0;
4214 if (l) {
4215 for(;;) {
4216 /* read param name and compute offset */
4217 if (l != FUNC_OLD) {
4218 if ((pt.t & VT_BTYPE) == VT_VOID && tok == ')')
4219 break;
4220 type_decl(&pt, &ad1, &n, TYPE_DIRECT | TYPE_ABSTRACT);
4221 if ((pt.t & VT_BTYPE) == VT_VOID)
4222 tcc_error("parameter declared as void");
4223 arg_size += (type_size(&pt, &align) + PTR_SIZE - 1) / PTR_SIZE;
4224 } else {
4225 n = tok;
4226 if (n < TOK_UIDENT)
4227 expect("identifier");
4228 pt.t = VT_VOID; /* invalid type */
4229 next();
4231 convert_parameter_type(&pt);
4232 s = sym_push(n | SYM_FIELD, &pt, 0, 0);
4233 *plast = s;
4234 plast = &s->next;
4235 if (tok == ')')
4236 break;
4237 skip(',');
4238 if (l == FUNC_NEW && tok == TOK_DOTS) {
4239 l = FUNC_ELLIPSIS;
4240 next();
4241 break;
4243 if (l == FUNC_NEW && !parse_btype(&pt, &ad1))
4244 tcc_error("invalid type");
4246 } else
4247 /* if no parameters, then old type prototype */
4248 l = FUNC_OLD;
4249 skip(')');
4250 /* NOTE: const is ignored in returned type as it has a special
4251 meaning in gcc / C++ */
4252 type->t &= ~VT_CONSTANT;
4253 /* some ancient pre-K&R C allows a function to return an array
4254 and the array brackets to be put after the arguments, such
4255 that "int c()[]" means something like "int[] c()" */
4256 if (tok == '[') {
4257 next();
4258 skip(']'); /* only handle simple "[]" */
4259 mk_pointer(type);
4261 /* we push a anonymous symbol which will contain the function prototype */
4262 ad->f.func_args = arg_size;
4263 ad->f.func_type = l;
4264 s = sym_push(SYM_FIELD, type, 0, 0);
4265 s->a = ad->a;
4266 s->f = ad->f;
4267 s->next = first;
4268 type->t = VT_FUNC;
4269 type->ref = s;
4270 } else if (tok == '[') {
4271 int saved_nocode_wanted = nocode_wanted;
4272 /* array definition */
4273 next();
4274 if (tok == TOK_RESTRICT1)
4275 next();
4276 n = -1;
4277 t1 = 0;
4278 if (tok != ']') {
4279 if (!local_stack || (storage & VT_STATIC))
4280 vpushi(expr_const());
4281 else {
4282 /* VLAs (which can only happen with local_stack && !VT_STATIC)
4283 length must always be evaluated, even under nocode_wanted,
4284 so that its size slot is initialized (e.g. under sizeof
4285 or typeof). */
4286 nocode_wanted = 0;
4287 gexpr();
4289 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
4290 n = vtop->c.i;
4291 if (n < 0)
4292 tcc_error("invalid array size");
4293 } else {
4294 if (!is_integer_btype(vtop->type.t & VT_BTYPE))
4295 tcc_error("size of variable length array should be an integer");
4296 t1 = VT_VLA;
4299 skip(']');
4300 /* parse next post type */
4301 post_type(type, ad, storage, 0);
4302 if (type->t == VT_FUNC)
4303 tcc_error("declaration of an array of functions");
4304 t1 |= type->t & VT_VLA;
4306 if (t1 & VT_VLA) {
4307 loc -= type_size(&int_type, &align);
4308 loc &= -align;
4309 n = loc;
4311 vla_runtime_type_size(type, &align);
4312 gen_op('*');
4313 vset(&int_type, VT_LOCAL|VT_LVAL, n);
4314 vswap();
4315 vstore();
4317 if (n != -1)
4318 vpop();
4319 nocode_wanted = saved_nocode_wanted;
4321 /* we push an anonymous symbol which will contain the array
4322 element type */
4323 s = sym_push(SYM_FIELD, type, 0, n);
4324 type->t = (t1 ? VT_VLA : VT_ARRAY) | VT_PTR;
4325 type->ref = s;
4327 return 1;
4330 /* Parse a type declarator (except basic type), and return the type
4331 in 'type'. 'td' is a bitmask indicating which kind of type decl is
4332 expected. 'type' should contain the basic type. 'ad' is the
4333 attribute definition of the basic type. It can be modified by
4334 type_decl(). If this (possibly abstract) declarator is a pointer chain
4335 it returns the innermost pointed to type (equals *type, but is a different
4336 pointer), otherwise returns type itself, that's used for recursive calls. */
4337 static CType *type_decl(CType *type, AttributeDef *ad, int *v, int td)
4339 CType *post, *ret;
4340 int qualifiers, storage;
4342 /* recursive type, remove storage bits first, apply them later again */
4343 storage = type->t & VT_STORAGE;
4344 type->t &= ~VT_STORAGE;
4345 post = ret = type;
4347 while (tok == '*') {
4348 qualifiers = 0;
4349 redo:
4350 next();
4351 switch(tok) {
4352 case TOK_CONST1:
4353 case TOK_CONST2:
4354 case TOK_CONST3:
4355 qualifiers |= VT_CONSTANT;
4356 goto redo;
4357 case TOK_VOLATILE1:
4358 case TOK_VOLATILE2:
4359 case TOK_VOLATILE3:
4360 qualifiers |= VT_VOLATILE;
4361 goto redo;
4362 case TOK_RESTRICT1:
4363 case TOK_RESTRICT2:
4364 case TOK_RESTRICT3:
4365 goto redo;
4366 /* XXX: clarify attribute handling */
4367 case TOK_ATTRIBUTE1:
4368 case TOK_ATTRIBUTE2:
4369 parse_attribute(ad);
4370 break;
4372 mk_pointer(type);
4373 type->t |= qualifiers;
4374 if (ret == type)
4375 /* innermost pointed to type is the one for the first derivation */
4376 ret = pointed_type(type);
4379 if (tok == '(') {
4380 /* This is possibly a parameter type list for abstract declarators
4381 ('int ()'), use post_type for testing this. */
4382 if (!post_type(type, ad, 0, td)) {
4383 /* It's not, so it's a nested declarator, and the post operations
4384 apply to the innermost pointed to type (if any). */
4385 /* XXX: this is not correct to modify 'ad' at this point, but
4386 the syntax is not clear */
4387 parse_attribute(ad);
4388 post = type_decl(type, ad, v, td);
4389 skip(')');
4391 } else if (tok >= TOK_IDENT && (td & TYPE_DIRECT)) {
4392 /* type identifier */
4393 *v = tok;
4394 next();
4395 } else {
4396 if (!(td & TYPE_ABSTRACT))
4397 expect("identifier");
4398 *v = 0;
4400 post_type(post, ad, storage, 0);
4401 parse_attribute(ad);
4402 type->t |= storage;
4403 return ret;
4406 /* compute the lvalue VT_LVAL_xxx needed to match type t. */
4407 ST_FUNC int lvalue_type(int t)
4409 int bt, r;
4410 r = VT_LVAL;
4411 bt = t & VT_BTYPE;
4412 if (bt == VT_BYTE || bt == VT_BOOL)
4413 r |= VT_LVAL_BYTE;
4414 else if (bt == VT_SHORT)
4415 r |= VT_LVAL_SHORT;
4416 else
4417 return r;
4418 if (t & VT_UNSIGNED)
4419 r |= VT_LVAL_UNSIGNED;
4420 return r;
4423 /* indirection with full error checking and bound check */
4424 ST_FUNC void indir(void)
4426 if ((vtop->type.t & VT_BTYPE) != VT_PTR) {
4427 if ((vtop->type.t & VT_BTYPE) == VT_FUNC)
4428 return;
4429 expect("pointer");
4431 if (vtop->r & VT_LVAL)
4432 gv(RC_INT);
4433 vtop->type = *pointed_type(&vtop->type);
4434 /* Arrays and functions are never lvalues */
4435 if (!(vtop->type.t & VT_ARRAY) && !(vtop->type.t & VT_VLA)
4436 && (vtop->type.t & VT_BTYPE) != VT_FUNC) {
4437 vtop->r |= lvalue_type(vtop->type.t);
4438 /* if bound checking, the referenced pointer must be checked */
4439 #ifdef CONFIG_TCC_BCHECK
4440 if (tcc_state->do_bounds_check)
4441 vtop->r |= VT_MUSTBOUND;
4442 #endif
4446 /* pass a parameter to a function and do type checking and casting */
4447 static void gfunc_param_typed(Sym *func, Sym *arg)
4449 int func_type;
4450 CType type;
4452 func_type = func->f.func_type;
4453 if (func_type == FUNC_OLD ||
4454 (func_type == FUNC_ELLIPSIS && arg == NULL)) {
4455 /* default casting : only need to convert float to double */
4456 if ((vtop->type.t & VT_BTYPE) == VT_FLOAT) {
4457 gen_cast_s(VT_DOUBLE);
4458 } else if (vtop->type.t & VT_BITFIELD) {
4459 type.t = vtop->type.t & (VT_BTYPE | VT_UNSIGNED);
4460 type.ref = vtop->type.ref;
4461 gen_cast(&type);
4463 } else if (arg == NULL) {
4464 tcc_error("too many arguments to function");
4465 } else {
4466 type = arg->type;
4467 type.t &= ~VT_CONSTANT; /* need to do that to avoid false warning */
4468 gen_assign_cast(&type);
4472 /* parse an expression and return its type without any side effect. */
4473 static void expr_type(CType *type, void (*expr_fn)(void))
4475 nocode_wanted++;
4476 expr_fn();
4477 *type = vtop->type;
4478 vpop();
4479 nocode_wanted--;
4482 /* parse an expression of the form '(type)' or '(expr)' and return its
4483 type */
4484 static void parse_expr_type(CType *type)
4486 int n;
4487 AttributeDef ad;
4489 skip('(');
4490 if (parse_btype(type, &ad)) {
4491 type_decl(type, &ad, &n, TYPE_ABSTRACT);
4492 } else {
4493 expr_type(type, gexpr);
4495 skip(')');
4498 static void parse_type(CType *type)
4500 AttributeDef ad;
4501 int n;
4503 if (!parse_btype(type, &ad)) {
4504 expect("type");
4506 type_decl(type, &ad, &n, TYPE_ABSTRACT);
4509 static void parse_builtin_params(int nc, const char *args)
4511 char c, sep = '(';
4512 CType t;
4513 if (nc)
4514 nocode_wanted++;
4515 next();
4516 while ((c = *args++)) {
4517 skip(sep);
4518 sep = ',';
4519 switch (c) {
4520 case 'e': expr_eq(); continue;
4521 case 't': parse_type(&t); vpush(&t); continue;
4522 default: tcc_error("internal error"); break;
4525 skip(')');
4526 if (nc)
4527 nocode_wanted--;
4530 ST_FUNC void unary(void)
4532 int n, t, align, size, r, sizeof_caller;
4533 CType type;
4534 Sym *s;
4535 AttributeDef ad;
4537 sizeof_caller = in_sizeof;
4538 in_sizeof = 0;
4539 type.ref = NULL;
4540 /* XXX: GCC 2.95.3 does not generate a table although it should be
4541 better here */
4542 tok_next:
4543 switch(tok) {
4544 case TOK_EXTENSION:
4545 next();
4546 goto tok_next;
4547 case TOK_CINT:
4548 case TOK_CCHAR:
4549 case TOK_LCHAR:
4550 t = VT_INT;
4551 push_tokc:
4552 type.t = t;
4553 vsetc(&type, VT_CONST, &tokc);
4554 next();
4555 break;
4556 case TOK_CUINT:
4557 t = VT_INT | VT_UNSIGNED;
4558 goto push_tokc;
4559 case TOK_CLLONG:
4560 t = VT_LLONG;
4561 goto push_tokc;
4562 case TOK_CULLONG:
4563 t = VT_LLONG | VT_UNSIGNED;
4564 goto push_tokc;
4565 case TOK_CFLOAT:
4566 t = VT_FLOAT;
4567 goto push_tokc;
4568 case TOK_CDOUBLE:
4569 t = VT_DOUBLE;
4570 goto push_tokc;
4571 case TOK_CLDOUBLE:
4572 t = VT_LDOUBLE;
4573 goto push_tokc;
4574 case TOK_CLONG:
4575 case TOK_CULONG:
4576 #ifdef TCC_LONG_ARE_64_BIT
4577 t = VT_LLONG | VT_LONG;
4578 #else
4579 t = VT_INT | VT_LONG;
4580 #endif
4581 if (tok == TOK_CULONG)
4582 t |= VT_UNSIGNED;
4583 goto push_tokc;
4584 case TOK___FUNCTION__:
4585 if (!gnu_ext)
4586 goto tok_identifier;
4587 /* fall thru */
4588 case TOK___FUNC__:
4590 void *ptr;
4591 int len;
4592 /* special function name identifier */
4593 len = strlen(funcname) + 1;
4594 /* generate char[len] type */
4595 type.t = VT_BYTE;
4596 mk_pointer(&type);
4597 type.t |= VT_ARRAY;
4598 type.ref->c = len;
4599 vpush_ref(&type, data_section, data_section->data_offset, len);
4600 if (!NODATA_WANTED) {
4601 ptr = section_ptr_add(data_section, len);
4602 memcpy(ptr, funcname, len);
4604 next();
4606 break;
4607 case TOK_LSTR:
4608 #ifdef TCC_TARGET_PE
4609 t = VT_SHORT | VT_UNSIGNED;
4610 #else
4611 t = VT_INT;
4612 #endif
4613 goto str_init;
4614 case TOK_STR:
4615 /* string parsing */
4616 t = VT_BYTE;
4617 if (tcc_state->char_is_unsigned)
4618 t = VT_BYTE | VT_UNSIGNED;
4619 str_init:
4620 if (tcc_state->warn_write_strings)
4621 t |= VT_CONSTANT;
4622 type.t = t;
4623 mk_pointer(&type);
4624 type.t |= VT_ARRAY;
4625 memset(&ad, 0, sizeof(AttributeDef));
4626 decl_initializer_alloc(&type, &ad, VT_CONST, 2, 0, 0);
4627 break;
4628 case '(':
4629 next();
4630 /* cast ? */
4631 if (parse_btype(&type, &ad)) {
4632 type_decl(&type, &ad, &n, TYPE_ABSTRACT);
4633 skip(')');
4634 /* check ISOC99 compound literal */
4635 if (tok == '{') {
4636 /* data is allocated locally by default */
4637 if (global_expr)
4638 r = VT_CONST;
4639 else
4640 r = VT_LOCAL;
4641 /* all except arrays are lvalues */
4642 if (!(type.t & VT_ARRAY))
4643 r |= lvalue_type(type.t);
4644 memset(&ad, 0, sizeof(AttributeDef));
4645 decl_initializer_alloc(&type, &ad, r, 1, 0, 0);
4646 } else {
4647 if (sizeof_caller) {
4648 vpush(&type);
4649 return;
4651 unary();
4652 gen_cast(&type);
4654 } else if (tok == '{') {
4655 int saved_nocode_wanted = nocode_wanted;
4656 if (const_wanted)
4657 tcc_error("expected constant");
4658 /* save all registers */
4659 save_regs(0);
4660 /* statement expression : we do not accept break/continue
4661 inside as GCC does. We do retain the nocode_wanted state,
4662 as statement expressions can't ever be entered from the
4663 outside, so any reactivation of code emission (from labels
4664 or loop heads) can be disabled again after the end of it. */
4665 block(NULL, NULL
|
__label__pos
| 0.957091 |
Welcome to the Treehouse Community
Want to collaborate on code errors? Have bugs you need feedback on? Looking for an extra set of eyes on your latest project? Get support with fellow developers, designers, and programmers of all backgrounds and skill levels here with the Treehouse Community! While you're at it, check out some resources Treehouse students have shared here.
Looking to learn something new?
Treehouse offers a seven day free trial for new students. Get access to thousands of hours of content and join thousands of Treehouse students and alumni in the community today.
Start your free trial
JavaScript
Deleted User
Can you tell me why this isn't right? (Introduction to jQuery --> Using jQuery to Select Elements)
Question: "On the next line, use jQuery to select all list items (li) in an unordered list (ul) with the class of 'nav'?"
$("#container"); $("ul li .nav");
3 Answers
The reason your code doesn't work Alan is because that selector is selecting elements with the nav class name, inside a list item, inside an unordered list.
<ul>
<li>
<span class="nav"></span>
</li>
</ul>
If that was your code, the span element would be selected. But the nav class name should be on the unordered list, not inside the list item.
So this is the code you want:
$("ul.nav li");
James Barnett
James Barnett
39,199 Points
The key to this question is to understand that the <ul> element has a class of nav, in other words it's <ul class = "nav">.
So the HTML would look something like this:
<ul class="nav">
<li></li>
</ul>
Now ask youself ... what is the CSS selector I would use to select all <li>s from the above markup?
Deleted User
Thank you both very much! That is crystal clear for me now.
|
__label__pos
| 0.776399 |
Sign up ×
Mathematics Stack Exchange is a question and answer site for people studying math at any level and professionals in related fields. It's 100% free, no registration required.
There is a corollary in Rudin analysis. But I am not able to understand it. Can someone help to understand it?
The Corollary is:
Let $f$ be a real differential function on $[a,b]$, then $f'$ cannot have any simple discontinuity.
share|cite|improve this question
Did you understand the definition of simple discontinuity? Did you understand the statement to which this is a corollary? – Siminore Nov 24 '12 at 13:27
YES. It means both left and right hand limit exist, for simple discontinuity. Theorem proves the intermediate value theorem for derivative. – user38764 Nov 24 '12 at 13:32
1
Does "differential" mean "differentiable"? – Chris Eagle Nov 24 '12 at 13:46
yes............ – user38764 Nov 24 '12 at 13:49
2 Answers 2
up vote 0 down vote accepted
I'll sketch the argument. If the left and right hand limits $f'(c-)$ and $f'(c+)$ both exist and are not equal, then we're in a situation similar to $f'(c-) < f'(c) < f'(c+)$. So working on the lefthand side, we can find an $\epsilon > 0$ $f'(x) < f'(c) - \epsilon$ for all $x \in (c-\delta, c)$. Applying the theorem, we have a contradiction.
share|cite|improve this answer
You don't even need the theorem about the intermediate value property of the derivative, because one has the following fact:
If $f$ is continuous at $a$, differentiable for $x>a$, and $\lim_{x\to a+} f'(x)=p$, then one has $$ \lim_{x\to a+}{f(x)-f(a)\over x-a}=p\ .$$
Proof. Given an $\epsilon>0$ there is a $\delta>0$ such that $$|f'(x)-p|<\epsilon\qquad\bigl(x\in\ ]a,a+\delta[\ \bigr)\ .$$ Let $x\in\ ]a,a+\delta[\ $. Then by the mean value theorem there is a $\xi\in\ ]a,x[\ \subset \ ]a,a+\delta[\ $ such that $$\left|{f(x)-f(a)\over x-a}- p\right|=\bigl|f'(\xi)-p\bigr|<\epsilon\ .\qquad\qquad\square$$ It follows that the limits $\lim_{x\to a+} f'(x)$ and $\lim_{x\to a-} f'(x)$ cannot both exist and be different, if $f$ is differentiable at $a$.
share|cite|improve this answer
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.86743 |
Beefy Boxes and Bandwidth Generously Provided by pair Networks DiBona
P is for Practical
PerlMonks
Re^2: Your random numbers are not that random
by davies (Vicar)
on Jul 22, 2012 at 07:34 UTC ( #983044=note: print w/ replies, xml ) Need Help??
in reply to Re: Your random numbers are not that random
in thread Your random numbers are not that random
Well, I've tried at least one version of perlbrew, but it crashes. It insists on running tests, and there isn't the memory on the Pi for that. Besides, I'm trying to avoid the hours of compiling that seem to be necessary. If there's a site that will give me downloadable binaries for the Pi, please let me know. My fear is that such a copy would leave me facing exactly the same problems as I'm currently getting when trying to copy a working installation.
Regards,
John Davies
Comment on Re^2: Your random numbers are not that random
Re^3: Your random numbers are not that random
by tobyink (Monsignor) on Jul 22, 2012 at 09:03 UTC
perlbrew install runs tests by default, but the --notest option can be used to skip tests.
perl -E'sub Monkey::do{say$_,for@_,do{($monkey=[caller(0)]->[3])=~s{::}{ }and$monkey}}"Monkey say"->Monkey::do'
I ran my own download and compilation without tests to get the working version. I'm not clear what advantages perlbrew would offer, since I'm trying to avoid having to compile for every card (and every time I re-flash a card).
Regards,
John Davies
Compile it once onto one card and then make clones of that card. (If you have two SD slots, you could use the dd command on Linux to create fast, exact copies. Even without a second SD slot, you could use dd via a temporary intermediate disk image file stored on some other medium.)
perlbrew comes in handy because it's very good at ensuring Perl installs into one particular directory without leaving any crud lying around on other parts of your system. (And also because it provides the shell aliases for swapping between different installations of Perl very easily.)
perl -E'sub Monkey::do{say$_,for@_,do{($monkey=[caller(0)]->[3])=~s{::}{ }and$monkey}}"Monkey say"->Monkey::do'
Log In?
Username:
Password:
What's my password?
Create A New User
Node Status?
node history
Node Type: note [id://983044]
help
Chatterbox?
and the web crawler heard nothing...
How do I use this? | Other CB clients
Other Users?
Others examining the Monastery: (10)
As of 2013-12-20 23:18 GMT
Sections?
Information?
Find Nodes?
Leftovers?
Voting Booth?
How do you parse XML?
Results (433 votes), past polls
|
__label__pos
| 0.598457 |
What is the constant of
The circumference of a circle varies directly from its diameter. If the circumference of the circle having a diameter of 7cm is 7 cm, what is the circumferences of the circle whose diameter is 10cm? 18cm? 20cm?
a. Write a mathematical statement that relate two quantities involved in the problem
b. What is the constant of variation? Formulate the mathematical equation
c. Construct a table of values from the relation.
Solution Preview :
Prepared by a verified Expert
Mathematics: What is the constant of
Reference No:- TGS01119901
Now Priced at $5 (50% Discount)
Recommended (99%)
Rated (4.3/5)
|
__label__pos
| 0.999937 |
Take the 2-minute tour ×
MathOverflow is a question and answer site for professional mathematicians. It's 100% free, no registration required.
I'm doing some work with model categories and operads, and to check a certain hypothesis I've had to learn a bit of equivariant homotopy theory. Let $M$ be a model category and $G$ be a finite group. We can assume $M$ is cofibrantly generated and left proper, but I'm trying to avoid assuming $M$ is combinatorial. If necessary we can assume $M$ is cellular, but right now I don't see how that can help. It has turned out to be useful to know that $M^G$ has a model structure where the cofibrations are $G$-equivariant maps which are cofibrations in $M$. This is simply the injective model structure on $M^G$, where a map $f$ is a weak equivalences or cofibrations if the underlying map in $M$ is such.
I tend to think of $M^G$ as a special case of a diagram category $M^I$ (where $I$ is small), because I feel like I have a decent grasp on diagram categories. In that setting, I believe one must know that $M$ is combinatorial in order to know the injective model structure exists. However, I don't have a good reference for this other than having seen it mentioned without proof or reference in a number of papers.
(1) Can anyone provide a reference which proves the injective model structure on $M^I$ exists? I'd like to see where the hypothesis that $M$ is combinatorial gets used. I'd also like the reference to prove the injective model structure is cofibrantly generated.
It's worth noting that Proposition A.2.8.2 in Lurie's Higher Topos Theory proves existence of the injective model structure, but I'm not satisfied with that for a couple of reasons. First, the proof is very complicated because Lurie wants it to hold in the setting where $M$ and $I$ are enriched over an excellent model category. My ideal reference would be a simpler proof holding in the non-enriched setting, preferably the first place the injective model structure was defined. Second (and related), because everything in this appendix is about combinatorial model categories, I can't help but wonder if there's a proof which relies on that hypothesis less. Finally, it's almost impossible for me to get my hands on the generating (trivial) cofibrations from that proof. Lurie relies on the very-complicated Lemma A.3.3.3 to get generating cofibrations and on Proposition A.2.6.8, which says basically that if you're in a category which is almost combinatorial (missing only generating trivial cofibrations) then you can get the generating trivial cofibrations for free from the generating cofibrations.
In the special case where $I$ is a group $G$, I can't seem to find anything on the injective model structure. Most of the work I can find on equivariant homotopy theory uses the projective model structure instead of the injective (and this one is known to exist if $M$ is cofibrantly generated). I imagine that with so much structure on $I$ and with so much theory which has been developed out there for equivariant homotopy theory, one should be able to come up with a much better proof in this setting than the one in HTT.
(2) Is the hypothesis that $M$ is combinatorial still necessary to prove existence of the injective model structure on $M^G$? In what ways is this model structure nicer than $M^I$ for a generic $I$?
share|improve this question
2 Answers 2
It seems very unlikely to me that you will be able to get any useful handle on the generating acyclic cofibrations in the injective model structure, even in simple cases like when $I$ is the delooping of a group. The only way I have ever seen to show that they exist is by using some nasty cardinality argument akin to Lurie's A3.3.3.
I believe that the first construction of the injective model structure on diagrams of simplicial sets (specifically) was in Alex Heller's monograph "Homotopy Theories", section II.4. I don't quite understand his argument at the moment; it doesn't seem to use cofibrant generation directly.
Another, somewhat more general, reference, which is also earlier than Lurie, is Tibor Beke's paper Sheafifiable homotopy model categories, which uses a logical approach and requires that the model category be not only combinatorial but "sheaffiable".
I don't think I've ever seen any construction of an injective model structure for a non-combinatorial model category.
share|improve this answer
Hi. Thanks for the answer and references. I'll have to look into them. I'm voting you up, but not accepting the answer yet as I still hope others come along and give more references or ideas about avoiding this A.3.3.3 argument. My hope was that early references wouldn't have that. I suppose I'll find out soon when I read the ones you mention. – David White Aug 16 '12 at 0:02
@Mike At first I thought "logical approach" meant "straightforward", but having skimmed the paper now, I see you mean "logical" in the literal sense! A very remarkable paper. – Zhen Lin Apr 12 '13 at 15:51
Mike, concerning your last paragraph, you probably have seen these: Reedy model structures on diagrams indexed by an inverse category. – Fernando Muro Jun 16 at 9:25
@FernandoMuro, of course, but what I meant was an injective model structure for a general diagram shape. – Mike Shulman Jun 17 at 17:04
It seems this question has been answered very nicely since I asked it in the paper Left Induced Model Structures and Diagram Categories. They prove in Proposition 4.17 and Theorem 4.19 that if $M$ has a Postnikov presentation and the class of cofibrations coincides with the class of monomorphisms, and if $D$ is a small indexing category, and if maps in $M^D$ can be factored into a trivial cofibration followed by a fibration via the Postnikov presentation on $M$ then $M^D$ admits an injective model structure. In particular, this does not require $M$ to be combinatorial. You can also drop the need for cofibrations to equal monomorphisms if you ask $M^D$ to have both types of factorization. This paper also does a fantastic job spelling out the duality between asking a model category to be cofibrantly generated vs. to have a Postnikov presentation. Since the injective model structure is dual to the projective, I don't think a better answer than this one can be found, but I am glad to know combinatoriality is not needed. For those who enjoy the Bayeh et. al. paper, I also recommend the extension of this paper which can be found in the appendix to Hess and Shipley's Waldhausen K-Theory of Spaces via Comodules.
share|improve this answer
To what new examples does this apply? – Lennart Meier Jun 17 at 7:13
My gut instinct is that it applies to Top. Since Top is not combinatorial it wasn't an example before. But I think you'd have to carefully work out the Postnikov presentation – David White Jun 17 at 13:03
Also: part of my reason for posting this answer is that I got a message from another mathematician who presumably had some application in mind and wanted to know if I'd ever delved deeper into this question. I think even in the combinatorial case it's nice to have a second proof that does not pass through the machinery of Smith's theorem, which can be mysterious to new-comers to the field. – David White Jun 17 at 16:45
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.511971 |
• admin
Android ViewPager with Fragments: Tutorial and Sample Project
This is a common paradigm in app development: I have a list of data objects representing, for example, recent shipments, and I want to display them in gallery format, so that I can swipe between them. Fortunately, Google anticipated the need for this paradigm and provided for us the tools to easily create the associated UI.
That class is ViewPager, and its associated data manager, PagerAdapter. If you have worked with Android ListViews before, you have seen something along the same lines as PagerAdapter: in Android terms, an Adapter is a class which translates data and turns it into UI elements. While an Adapter for a class such as ListView operates on Views, PagerAdapter’s concrete implementations operate on that much-maligned piece of Android UI, the Fragment. (Generally speaking, I am less harsh on Fragments than most of my colleagues in the Android development world, but that is a topic for another day.) FragmentPagerAdapter, the concrete implementation we will use today, generates Fragments for a list of data objects. We will run through the project now, taking note of some interesting features. You can download the source code here.
First: layout/activity_main.xml:
<android.support.v4.view.ViewPager
android:id="@+id/pager"
android:layout_width="match_parent"
android:layout_height="match_parent">
We have defined a ViewPager to occupy the full content area. As a view, ViewPager may be placed anywhere in the view hierarchy in an activity.
Second: MainActivity.java. Note that it extends FragmentActivity, a necessity for an activity hosting a ViewPager and a FragmentPagerAdapter. We have a few things to cover here. First, our adapter:
private class DemoFragmentAdapter extends FragmentPagerAdapter {
public DemoFragmentAdapter(FragmentManager fm) {
super(fm); // super tracks this
}
@Override
public Fragment getItem(int position) {
return DemoFragment.newInstance(mDemoData.get(position));
}
@Override
public int getCount() {
return mDemoData.size();
}
}
We will cover DemoFragment and DemoData in a bit: DemoData in particular is interesting, since it is an example of an implementation of Parcelable, a useful interface which allows an object to be inserted into a Bundle (in this case, as an argument to DemoFragment). The FragmentPagerAdapter class which we are extending handles all of the bookkeeping for us: it caches Fragments and tracks when they need to be replaced if the backing data has changed. All we need to do is provide the size of the data store, and implement a method to create a Fragment for each option.
Next, we will take a quick look at some of the code in MainActivity’s onCreate method.
mAdapter = new DemoFragmentAdapter(getSupportFragmentManager());
mPager = (ViewPager) findViewById(R.id.pager);
mPager.setAdapter(mAdapter);
mDemoData.add(new DemoData("Item1", "First item", 1.5));
mDemoData.add(new DemoData("Item2", "Second item", 2.5));
mDemoData.add(new DemoData("Item3", "Third item", 5.2));
mDemoData.add(new DemoData("Item4", "Fourth item", 5.1));
mDemoData.add(new DemoData("Item5", "Fifth item", 3.8));
mAdapter.notifyDataSetChanged();
Simple enough: create a DemoFragmentAdapter, attach it to the pager, and add some demo data. The last line, the call to the notifyDataSetChanged method, is the only potential stumbling block here: whenever the backing data changes, you must call mAdapter.notifyDataSetChanged(). If the ViewPager sees that the backing data has changed and that the adapter has not been notified, it will throw runtime exceptions.
DemoFragment is mostly straightforward, but the calls to OnFragmentInteractionListener.onFragmentResumed and .onFragmentCreated show a simple pattern for alerting the host activity to changes in the displayed data, if the host activity needs to respond to such changes.
Finally, DemoData:
public class DemoData implements Parcelable {
public final String name;
public final String desc;
public final double weight;
public DemoData(String name, String desc, double weight) {
this.name = name;
this.desc = desc;
this.weight = weight;
}
protected DemoData(Parcel in) {
name = in.readString();
desc = in.readString();
weight = in.readDouble();
}
public static final Creator<DemoData> CREATOR = new Creator<DemoData>() {
@Override
public DemoData createFromParcel(Parcel in) {
return new DemoData(in);
}
@Override
public DemoData[] newArray(int size) {
return new DemoData[size];
}
};
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel parcel, int i) {
parcel.writeString(name);
parcel.writeString(desc);
parcel.writeDouble(weight);
}
}
The bulk of this class is Parcelable boilerplate, which Android Studio will generate for you as an autofix action if you have left Parcelable methods unimplemented. You need four items: first, one to construct an object from a Parcel, which sets properties on the object. Parcelables have no concept of indexing or key-value pairing: you read objects from the Parcel in the same order as you write them.
Second, you need a Creator<YourType>. This is used internally by Parcelable, and this implementation is the canonical form. You never need anything further in the Creator.
Third, you need the describeContents method, which returns 0 in all common use cases. (See Google’s documentation for more.)
Finally, you need the writeToParcel method, which writes primitives and Parcelables to the Parcel for this object. Notice that, since you can write Parcelables and arrays of Parcelables to a Parcel, it is possible (if not always trivial) to write complicated objects to a Parcel. You can also deliver data to the fragment using a method on the fragment callback, if parceling your data is infeasible. For instance, this might take the shape of storing a database ID in the Parcel, then making a database query or web API call to obtain the data required to fully populate the fragment.
This concludes our exploration of the ViewPager and the techniques required to use fragments to back it. As you can see, it is a powerful tool to develop a seamlessly-paginated view on collections of like data.
Need more help or information?
Please use the comment section below to ask additional questions that you would like answered. For engineering help, we offer programming services to assist you with the development of your Android applications.
1,553 views
Recent Posts
See All
|
__label__pos
| 0.731141 |
<img src="https://ad.doubleclick.net/ddm/activity/src=9826842;type=pagev0;cat=allsi0;dc_lat=;dc_rdid=;tag_for_child_directed_treatment=;tfua=;npa=;gdpr=${GDPR};gdpr_consent=${GDPR_CONSENT_755};ord=1?" width="1" height="1" alt="">
Building a strong cyber defense against the industrial threats that could disrupt your operational technology (OT) infrastructure is increasingly becoming more challenging for critical infrastructure around the world. There are simply too many threats and never enough money or time to neutralize every risk.
Industrial organizations are struggling to manage this increasingly complex task because of three major issues:
• An expanding threat landscape, with more adversaries targeting OT systems and assets each year.
• A growing attack surface as more OT systems require external connections for remote accessibility.
• The lack of internal cybersecurity expertise focused on understanding and managing risk to the OT environment.
This whitepaper provides more detail on these issues and presents different security tools that industrial organizations can leverage to effectively and efficiently manage the growing risk to their OT environments.
SKIP
|
__label__pos
| 0.857987 |
A Lap around Microsoft Graph Toolkit Day 13 – Using Microsoft Graph Toolkit with React
Microsoft Graph team
Updates (December 2020):
As of December 11th, 2020, the Microsoft Graph Toolkit team released version 2.0. In this version, mgt-react is officially included as part of the project. There is a newly updated guide on how to use the Microsoft Graph Toolkit with React here.
Author: Fabio Franzini, Microsoft Office Development and Microsoft Business Application MVP
@franzinifabioGitHub | LinkedIn
Introduction
Welcome to Day 13 of the Microsoft Graph Toolkit blog series!
There are many application scenarios where developers need to create or modify stand-alone web applications using specific frameworks such as React – not just HTML or JavaScript. Microsoft itself uses this framework in many services and libraries among the most famous there is Fluent UI, a UI control library used in the Microsoft 365 services ecosystem and beyond. In this blog, we create a web application in React and show you how to use the Microsoft Graph Toolkit for both authentication and event visualization.
Here is a preview of the web application
View of the React application that will be built
In making the application, we use two approaches. First, we integrate the MGT components directly with React and highlight everything needed to make things workThen, we use the MGT-React librarya set of React controls that wrap the MGT web component library.
Creating Web App in React
First, we create a web app in React.
There are several ways to do this, but let’s make it easy by using a simple command called “create-react-app”. This command is extremely powerful because, thanks to the many options available, it is possible to do the scaffolding of the entire project with just a few manual operations. And, by executing this command using “npx, we don’t need to worry about the version as it always runs the latest version available.
npx create-react-app demo-mgt-react --template typescript
I added “–template typescript” as a parameter to use TypeScript as a programming language instead of the classic JavaScript.
Now that we have created the project, we add the necessary npm library that is “@microsoft/mgt”:
npm install @microsoft/mgt –save
Using MGT Providers
To recover data, this application requires you to have a provider configured. In this example we’re using MsalProvider, but we could use any other provider provided by MGT, it depends on the context in which you have to use it. Inside the App.tsx file we are going to add the following code which (when using a valid Client-ID) allows you to set MsalProvider and use its status to check if the user is logged in or not.
function App() {
const [isLoggedIn, setIsLoggedIn] = useState(false);
Providers.globalProvider = new MsalProvider({ clientId: '[CLIENT_ID]' });
Providers.globalProvider.onStateChanged((e) => {
if (Providers.globalProvider.state !== ProviderState.Loading)
setIsLoggedIn(Providers.globalProvider.state === ProviderState.SignedIn);
});
...
}
Using MGT with “mgt-react”
A more elegant way of integrating MGT with React is to use the mgt-react library. This is a library of React components that wraps all MGT components. Let’s see how it is used.
First, we need to add the npm package:
npm install mgt-react -–save
Then we import the components (React) that we want to use:
import { Agenda, MgtTemplateProps } from 'mgt-react';
All the attributes and events of the MGT component are mapped as properties of the corresponding React component, very simple.
For the templating part, however, everything changes. We can use all the power and knowledge in React to define the template with the syntax and components in React!
We can define a React component that has the MgtTemplateProps object as its type of props, like this:
import { MgtTemplateProps } from 'mgt-react';
const MyEvent = (props: MgtTemplateProps) => {
const { event } = props.dataContext;
return <div>{event.subject}</div>;
};
And use it in the component (always React) Agenda like this:
import { Agenda } from 'mgt-react';
const App = (props) => {
return <Agenda>
<MyEvent template="event">
</Agenda>
}
The “template” property (defined into the ”MgtTemplateProps” type) is used to understand which template (available in the Agenda control) is associated with the MyEvent control.
In this case, there is no need to use the ref property to “hook” the context of the template with custom functions. Simply create the functions you need in the component that acts as a template.
Let’s now implement the same template as before with the new set of React controls:
import React from 'react';
import '@microsoft/mgt';
import { Agenda, MgtTemplateProps } from 'mgt-react';
const AgendaReact = () => {
return (
<Agenda groupByDay={true} >
<Event template="event" />
<NoData template="no-data" />
</Agenda>
);
}
const Event = (props: MgtTemplateProps) => {
const { event } = props.dataContext;
const openWebLink = () => {
window.open(event.webLink, '_blank');
};
const getDate = (dateString: string) => {
let dateObject = new Date(dateString);
return dateObject.setHours(0, 0, 0, 0);
};
const getTime = (dateString: string) => {
let dateObject = new Date(dateString);
return dateObject.getHours().toString().padStart(2, '0')
+ ':' +
dateObject.getMinutes().toString().padStart(2, '0');
};
return (
<div className="...">
<div className="..." onClick={() => { openWebLink(); }}>
{event.subject}
</div>
{(getDate(event.start.dateTime)
== getDate(event.end.dateTime)) ?
<div className="...">
from {getTime(event.start.dateTime)}
to {getTime(event.end.dateTime)}
</div>
: null
}
{(event.body.content != '') ?
<div className="..."
dangerouslySetInnerHTML={{ __html: event.body.content }} />
: null
}
</div>
);
};
const NoData = (props: MgtTemplateProps) => {
return <div className="...">
No events to show
</div>
};
export default AgendaReact;
Add a style to this React App
Obviously, what is missing is a bit of style. MGT components already have their own style, but when we go to define our templates, we must make do with the CSS we want to use. In this example application I used a very interesting CSS framework, Tailwind CSS, added as a regular npm package:
npm install tailwindcss -–save
As for Tailwind CSS, the only thing we need to do before using it in the project is to create a file like “tailwind.css” in the root of the “src” folder and add this content:
@tailwind base;
@tailwind components;
@tailwind utilities;
This is used by the Tailwind CSS compiler to generate the CSS that will contain the classes used.
Now the only thing to do is to use the classes that the framework makes available to give style to the application. I leave you at the official link to learn more about Tailwind CSS here: https://tailwindcss.com/.
Recap
We have come to an end and we have managed to use MGT within a React application.
You can find the source code for this React App sample here: https://github.com/microsoftgraph/mgtLap-TryItOut
0 comments
Comments are closed. Login to edit/delete your existing comments
Feedback usabilla icon
|
__label__pos
| 0.623179 |
Third-party software plug-ins that extend the functionality of the Apple Safari web browser program
learn more… | top users | synonyms (1)
17
votes
10answers
137k views
How to take a full page screenshot in Mac OS Safari?
I'm looking for a Safari Extension that can take full page screenshots. I am aware there are various extensions, but haven't found any that would take full page images.
14
votes
2answers
12k views
How to make Safari remember zoom level per site?
Safari's default font size is not large enough for my eyes so I have to press ⌘++ for a while on every website I visit. I could've used CSS to set the default zoom level in Safari but I want different ...
13
votes
4answers
6k views
Safari extension to get rid of Google redirect links in search results?
I'm looking for a Safari extension that can rewrite the google search results so that it gives you the direct URLs rather than the long jumbled redirect links that often pop up. If you're wondering ...
11
votes
8answers
21k views
Can Safari remember my tabs from last time?
I want my previous browsing tabs to be restored when restarting Safari. I can do this in Firefox and Chrome. Is there a setting or an extension for this? EDIT: So it turns out I was killing (word ...
11
votes
4answers
12k views
Is there anything like OneTab available for Safari?
I am looking for a "tabs decluttering" app for Safari like OneTab Chrome Extension How OneTab Works Whenever you find yourself with too many tabs, click the OneTab icon to convert all of ...
9
votes
4answers
13k views
Are there Greasemonkey scripts for Safari?
There's the wonderful Greasemonkey to run user-defined JavaScript on web pages. Is there such a thing as Greasemonkey for Safari? Or would one instead write a Safari Extension to customize behavior ...
9
votes
4answers
10k views
Is there an extension to give Safari the same multiple search engine functionality as Chrome?
I have to admit, I never thought that I'd see the day I preferred a different desktop browser to Chrome. But with the latest release of Safari, I'm a believer. It (to me) performs faster, looks better,...
8
votes
5answers
14k views
Safari extension to monitor web pages for changes?
I am looking for a Safari extension similar to Update Scanner (for Firefox) and Page Monitor (for Chrome). Any suggestions?
7
votes
2answers
3k views
Safari Extension to Manage Tabs?
I'm looking for a more efficient way to manage my tabs in Safari. Specifically something that allows me to move multiple tabs at once to a new window. I often find myself following a link for a new ...
7
votes
1answer
933 views
Is it possible to disable Safari extensions when using Private browsing?
I recently switched from Chrome to Safari for a few choice web applications, and I found that I couldn't disable extensions like Lastpass when browsing privately, at least not apparently, like I could ...
6
votes
1answer
977 views
Switch Twitter Accounts in Safari?
There's add-ons for Firefox to do this, switching twitter accounts without having to sign-in 3-5 times a day is really time saving if you have more than 2 accounts, CookieSwap do the job, is there any ...
6
votes
1answer
608 views
Mark Safari tabs with different colors?
Is there an extension or a plugin that allows for marking different tabs with colors ? I want to be able to group together a bunch of tabs that are relevant under one color using Safari.
5
votes
3answers
6k views
Safari - Display Favicons In Favorites Bar
Is it possible to display favicons in Safari's Favorites Bar? For example, my Favorites Bar in Firefox looks like this: I've tried using glims, but can't get it to do this.
5
votes
2answers
3k views
How do I search for Safari Extensions?
I noticed that Safari has an Extensions Gallery page. I'm wondering how I would search all the extensions to find the one I want. Does that page list all of the extensions available, meaning I should ...
5
votes
2answers
5k views
Is there any way to view word documents within Safari on the Mac?
The title pretty much sums up the question. I have looked, and could not find anything, neither paid, nor free. I know about the DocPreview plugin, however that does not work in Safari 5.1+. There is ...
5
votes
5answers
8k views
Vimperator Style Extension for Safari 5?
Do we have a Vimperator style extension for Safari 5?
5
votes
1answer
7k views
Better tabs management in Safari 6.x
I'm looking for extensions to allow the Safari 6.x versions to manage a large number of open tabs better than the current implementation. In particular I'm looking for two features: Vertical tabs. ...
4
votes
3answers
329 views
Blur all images in Safari, unless clicked
I know that the 'Develop' menu allows me to block all images in Safari, however I have a couple more requirements: Blur, not block, all images from all webpages Click to show blurred images I don'...
4
votes
4answers
8k views
Safari clear cache on quit
Is there any way of convincing Safari to clear the cache when it quits? If not from Safari's options, then maybe an extension that does this? I would use private browsing instead, but unfortunately ...
4
votes
1answer
5k views
Way to select HTML tables by column and add them to the pasteboard?
Whenever you try to copy an HTML table to the pasteboard, it will select by row. Sometimes, I'd like to select by columns. Is there a way to do this? If not out of the box, any extensions that enable ...
4
votes
1answer
2k views
Keywurl for Safari 5.1 / Lion
Has anyone made Keywurl work with Safari 5.1 after their Lion upgrade? I tried updating the MaxBundleVersion without luck: http://hacketal.com/making-keywurl-work-with-safari-5 I'm not interested ...
4
votes
2answers
2k views
How can I have ClickToFlash except for YouTube?
I use the ClickToFlash Safari extension, out of concern for Flash security vulnerabilities and to remove annoyances. However, I do not like its replacements for YouTube video players over YouTube's ...
4
votes
2answers
361 views
Italian language dictionary for Safari
Is there a way I can add the italian language to Safari's built in spelling support?
4
votes
2answers
16k views
How do I merge the tabs of two safari windows when I have more than two open?
So far, I know how I can merge all windows with all their tabs using: I use different windows in safari to organize them by content. Sometimes it makes sense to merge the tabs of two windows. This ...
4
votes
3answers
3k views
Installed Safari Extensions disappear after restarting Safari
I have a problem with my Safari. I can not install any extensions. Whenever I install one they work fine, but the next time I open Safari, the extension is not there. So I have to reinstall them at ...
3
votes
2answers
2k views
Force installing untrusted Safari extensions
Before Safari 9, I was able to install any extension on Safari without any problems, but after Apple updated its Developer Program, it seems that I can't install extensions not already signed by Apple ...
3
votes
1answer
2k views
Generating Safari developer certificate from Windows? The old instructions that have since been removed
NOTE: not sure if this is best forum among the StackExchange group of forums/sites for this question. Please recommend a better one if this is not the best one. I worked with generating Safari ...
3
votes
2answers
190 views
How can I figure out if a Safari extension is using unreasonable memory/cpu?
How can I figure out if a Safari extension is using unreasonable memory/cpu? I'm not really experiencing any unreasonable sluggishness, but I am starting to pile up on the extensions and was ...
3
votes
1answer
30 views
How can I make Safari or Chrome remember my messages in contact forms?
As a customer, I contact businesses through contact forms on their websites, and often I forget the exact wording of the message and when I sent it. (Often, I need to re-send messages when I don't ...
3
votes
0answers
379 views
Is there a Safari extension to hide results from a Google search?
IMHO one of the most underrated browser extension is Chrome’s Personal Blocklist (by Google). It hides results in your Google search results after you have blocked certain domains them once. Is there ...
2
votes
2answers
11k views
Safari Extension for right click menu: Open in Google Chrome?
Is it possible for an extension to add items to the right click menu in Safari? If you enable developer mode, there is a menu item called "Open Page With..." and Google Chrome is a choice if it's ...
2
votes
3answers
2k views
How can I automatically start the Safari Reader?
Is there a way to tell Safari to automatically use Reader when it recognizes an article which it is able to display with Reader? Right now I have to manually click the Reader button at the right side ...
2
votes
4answers
2k views
“App of the Day is a feature of Pet Match” on each page of Safari
Just today-yesterday I started to see the following text almost on each second page I open in Safari. How to disable this? "App of the Day is a feature of Pet Match and uses visual search ...
2
votes
4answers
3k views
List the extensions installed in Safari for Mac OS X, and state whether each one is enabled
I'd like a list, from which text can be copied. (Not a screenshot of preferences.) This information does not appear in any of the following: Safari System Profiler in Snow Leopard System ...
2
votes
1answer
3k views
Where is the SafariStand menu/preferences?
Inspired by this answer I installed SafariStand (after installing SIMBAL) (following instrtuctions in the read me file How To Install - Install SIMBL 0.9.5 or later http://culater.net/software/...
2
votes
1answer
216 views
Do all Safari 5 Extensions work on windows / mac?
I wold like to download some safari extensions, but I'm not sure if I should be looking for ones specifically written to run in Windows. Should I be looking for platform specific extensions?
2
votes
1answer
96 views
Generate Safari Developer Certificate for building extensions
I have gone through this link.But everything seems to be changed. Everything redirects to Apple Development Program.So is it mandatory to enroll in that development program to create a safari ...
2
votes
1answer
87 views
Privacy aspects of Safari extensions
I'm wondering about privacy aspects of Safari extensions, or which pieces of information active extensions can obtain about my web browsing activities. Should I expect that an extension will generally ...
2
votes
2answers
99 views
How do I make my 1Password extension positioning stick in Safari?
In IOS 8, I have the 1Password extension installed. I move it to the beginning of the queue so that it's the first item in the list of possible actions, but when I restart Safari, it's always at the ...
2
votes
1answer
3k views
RESTclient for Safari
Is there a POSTMAN for Chrome equivalent (or alternative to) for Safari in Mac OS X Mavericks? I hate switching between browsers just to be able to test my APIs.
2
votes
1answer
349 views
Google Quick Scroll extension for Safari
There is a great Chrome extension Google Quick Scroll that scrolls to funded text on a page after googling it. I want it for Safari but cannot find any alternatives or so. Is there?
2
votes
2answers
2k views
ClickToPlugin alike for Firefox?
I recently stumbled upon Pentadactyl. Its power is making me think about switching from Safari to Firefox. However there are some Safari extensions I really find useful. ClickToPlugin Reddit and ...
2
votes
1answer
2k views
Is there a Safari Extension for adding Mail-style Data Detectors to Web Pages?
I love the Data Detectors in Apple Mail. Open a message that has a date or time in the content, and creating a new Calendar event is a snap. But this feature is not in Safari, so I'm wondering if ...
2
votes
1answer
407 views
Teach Safari to ignore case if necessary when opening local html file
I'm using Safari to read the HTML documentation that came with something. Many of the .html files contain links to others amongst themselves where the case of the URL does not match the name of the ...
2
votes
4answers
4k views
can I run two versions of safari side by side
On a freshly intalled computer rather then importing my old safari bookmarks (ea wih migration assistant) I want to start anew with no bookmarks. However, from time to time I would like to browse ...
2
votes
2answers
3k views
Can I combine the address bar and the Google search bar in Safari?
The only reason I am using Chrome over Safari is the cool feature Chrome has namely the address bar acts as the search bar as well. Is there a way I can enable this feature in safari as well?
2
votes
1answer
373 views
Site Blocker for Mobile Safari Google Searches
Is there a way to block content scraper sites from Google searches on Mobile Safari? I would like something like the "Personal Blocklist" now available for Google Chrome.
2
votes
0answers
253 views
How do set a custom minimum tab width in Safari 8?
In Chrome when I have many tabs open it simply reduces the width of each tab. I'm trying Safari for the first time in years and am not enjoying the tabs that get smaller towards the edge. Is there a ...
2
votes
1answer
781 views
Safari Security wants to use confidential
I get a dialog with answer choices "Always allow | Deny | Allow" and text of Safari Security wants to use your confidential information stored in "Safari Extensions List" in your keychain. What is ...
2
votes
1answer
1k views
Is there a way to set a different Safari download folder per website?
Is there any way in Safari (6 on Mountain Lion) to have it remember a different download folder per website? So if I visit my bank and download a statement I can pick my /finance/ folder to download ...
|
__label__pos
| 0.671046 |
Skip to content
Related Articles
Related Articles
Minimum subsequences of a string A required to be appended to obtain the string B
• Last Updated : 05 Feb, 2021
Given two strings A and B, the task is to count the minimum number of operations required to construct the string B by following operations:
• Select a subsequence of the string A.
• Append the subsequence at the newly formed string (initially empty).
Print the minimum count of operations required. If it is impossible to make the new string equal to B by applying the given operations, then print -1.
Examples:
Input: A = “abc”, B = “abac”
Output: 2
Explanation:
Initially, C = “”.
Step 1: Select subsequence “ab” from string A and append it to the empty string C, i.e. C = “ab”.
Step 2: Select subsequence “ac” from string A and append it to the end of string C, i.e. C = “abac”.
Now, the string C is same as string B.
Therefore, count of operations required is 2.
Input: A = “geeksforgeeks”, B = “programming”
Output: -1
Approach: Follow the below steps to solve this problem:
1. Initialize a Map to map characters present in the string A with their respective indices.
2. For each character in string A, keep track of all of its occurrences.
3. Initialize a variable, say ans, to store the count of operations required. As the number of operations must be greater than 1, set ans = 1.
4. Iterate over the characters of string B and check if the character is present in the string A or not by using the Map.
5. Lastly, maximize the length of the subsequence chosen from the string A for each operation.
6. Finally, print the minimum operations required.
Below is the implementation of the above approach:
C++
// C++ program for the above approach
#include <bits/stdc++.h>
using namespace std;
// Function to count the minimum
// subsequences of a string A required
// to be appended to obtain the string B
void countminOpsToConstructAString(string A,
string B)
{
// Size of the string
int N = A.length();
int i = 0;
// Maps characters to their
// respective indices
map<char, set<int> > mp;
// Insert indices of characters
// into the sets
for (i = 0; i < N; i++) {
mp[A[i]].insert(i);
}
// Stores the position of the last
// visited index in the string A.
// Initially set it to -1.
int previous = -1;
// Stores the required count
int ans = 1;
// Iterate over the characters of B
for (i = 0; i < B.length(); i++) {
char ch = B[i];
// If the character in B is
// not present in A, return -1
if (mp[ch].size() == 0) {
cout << -1;
return;
}
// Fetch the next index from B[i]'s set
auto it = mp[ch].upper_bound(previous);
// If the iterator points to
// the end of that set
if (it == mp[ch].end()) {
previous = -1;
ans++;
--i;
continue;
}
// If it doesn't point to the
// end, update previous
previous = *it;
}
// Print the answer
cout << ans;
}
// Driver Code
int main()
{
string A = "abc", B = "abac";
countminOpsToConstructAString(A, B);
return 0;
}
Python3
# Python3 program for the above approac
from bisect import bisect_right
# Function to count the minimum
# subsequences of a A required
# to be appended to obtain the B
def countminOpsToConstructAString(A, B):
# Size of the string
N = len(A)
i = 0
# Maps characters to their
# respective indices
mp = [[] for i in range(26)]
# Insert indices of characters
# into the sets
for i in range(N):
mp[ord(A[i]) - ord('a')].append(i)
# Stores the position of the last
# visited index in the A.
# Initially set it to -1.
previous = -1
# Stores the required count
ans, i = 1, 0
# Iterate over the characters of B
while i < len(B):
ch = B[i]
# If the character in B is
# not present in A, return -1
if (len(mp[ord(ch) - ord('a')]) == 0):
print(-1)
return
# Fetch the next index from B[i]'s set
it = bisect_right(mp[ord(ch) - ord('a')], previous)
# If the iterator points to
# the end of that set
if (it == len(mp[ord(ch) - ord('a')])):
previous = -1
ans += 1
# i -= 1
continue
# If it doesn't poto the
# end, update previous
previous = mp[ord(ch) - ord('a')][it]
i += 1
# Prthe answer
print (ans)
# Driver Code
if __name__ == '__main__':
A, B = "abc", "abac"
countminOpsToConstructAString(A, B)
# This code is contributed by mohit kumar 29.
Output:
2
Time Complexity: O(N * logN)
Auxiliary Space: O(N)
Attention reader! Don’t stop learning now. Get hold of all the important DSA concepts with the DSA Self Paced Course at a student-friendly price and become industry ready.
My Personal Notes arrow_drop_up
Recommended Articles
Page :
|
__label__pos
| 0.990929 |
// a DirtyArea is used ingame to track rectangles on the visible map that need redrawing due to rubble and stuff
// this func is used only when initializing a scenario, and generating a random map
static void TacticalClass::ClearDirtyAreas() {
int len = vec_DirtyAreas.Length;
while(len) {
--len;
vec_DirtyAreas.Length = len;
if(len <= 0) {
break;
}
for(int idx = 0; idx < len; ++idx) {
// this copies one DirtyArea from position x to position x - 1
memcpy(&vec_DirtyAreas[idx], &vec_DirtyAreas[idx + 1], sizeof(DirtyArea));
}
}
}
|
__label__pos
| 0.999976 |
Returning a value from a function
To return a value from a function, use the return command along with a variable or value at the end of the function's command block. Consider the following function definition:
function isEven (num) {
if (num % 2 == 0) {
return num;
}
}
This function determines if a number is even. This function takes one parameter, num. On line 4, we return the value of num. Note line 4 is only executed if the Boolean expression on line 3 is true. Note that our Boolean expression uses the modulus operator. Let's find even numbers between 1 and 20 by using the function definition we show above.
In the following code,
<script language="javascript">
function isEven (num) {
if (num % 2 == 0) {
return num;
}
}
function printMessage (num) {
if (num > 1) {
document.write (num + " ");
}
}
var evenNum;
document.write ("Even numbers: ");
for (i = 1; i <= 20; i++)
{
evenNum = isEven (i);
printMessage (evenNum);
}
</script>
we are using two functions. We call the isEven () function on line 16 inside a for loop. Our for loop is on lines 14 through 18. The for loop executes 20 times and calls the isEven () function and the printMessage () function 20 times. The first time the isEven function is called with the parameter value 1. The function returns nothing because 1 % 2 does not equal 0, so the printMessage () function will print nothing. However, when the second time the for loop is run, the value 2 is passed to the isEven () function. This is a even number because 2 % 2 is 0 so the function will return 2. In this case, the printMessage () function will print the number 2. The following shows the output of the JavaScript code:
|
__label__pos
| 0.999982 |
หน้าแรก > คำถาม
คำถาม
ใครมี ตารางสูตรคูณ บ้างค่ะ แม่ 1-25 ก็ได้ค่ะ หรือ มีมากกว่านี้ก็ได้ค่ะ
ต้องการด่วนค่ะ เพราะหาไม่เจอ เพื่อนๆคนไหนใจดีช่วยหา ช่วยหน่อยนะค่ะ ขอบคุณล่วงหน้าค่ะ
คณิตศาสตร์ 26/2/53 โพสต์โดย panda 59
คำตอบ
1 จาก 10
1-2
2-4
3-6
4-8
5-10
6-12
7-14
8-16
9-18
10-20
11-22
12-24
1-3
2-6
3-9
4-12
5-15
6-18
7-21
8-24
9-27
10-30
11-33
12-36
1-4
2-8
3-12
4-16
5-20
6-24
7-28
8-32
9-36
10-40
11-44
12-48
1-5
2-10
3-15
4-20
5-25
6-30
7-35
8-40
9-45
10-50
11-55
12-60
1-6
2-12
3-18
4-24
5-30
6-36
7-42
8-48
9-54
10-60
11-66
12-72
1-7
2-14
3-21
4-24
5-35
6-42
7-49
8-56
9-63
10-70
11-77
12-84
1-8
2-16
3-20
4-24
5-30
6-48
7-56
8-64
9-72
10-80
11-88
12-96
1-9
2-18
3-27
4-36
5-45
6-54
7-63
8-72
9-81
10-90
11-99
12-108
1-10
2-20
3-30
4-40
5-50
6-60
7-70
8-80
9-90
10-100
11-110
12-120
1-11
2-22
3-33
4-44
5-55
6-66
7-77
8-88
9-99
10-110
11-121
12-132
1-12
2-24
3-36
4-48
5-60
6-72
7-84
8-96
9-108
10-120
11-132
12-144
1-13
2-26
3-39
4-52
5-65
6-78
7-91
8-104
9-117
10-130
11-143
12-156
1-14
2-28
3-42
4-56
5-70
6-84
7-98
8-112
9-126
10-140
11-154
12-186
1-15
2-30
3-45
4-60
5-75
6-90
7-105
8-120
9-135
10-150
11-165
12180
1-16
2-32
3-48
4-64
5-80
6-96
7-112
8-128
9-144
10-160
11-176
12-192
1-17
2-34
3-51
4-68
5-85
6-102
7-119
8-136
9-153
10-170
11-187
12-204
1-18
2-36
3-54
4-72
5-90
6-108
7-126
8-144
9-162
10-180
11-198
12-216
1-19
2-38
3-57
4-76
5-95
6-114
7-133
8-152
9-171
10-190
11-209
12-228
1-20
2-40
3-60
4-80
5-100
6-120
7-140
8-160
9-180
10-200
11-220
12-240
1-21
2-42
3-63
4-84
5-105
6-126
7-147
8-168
9-189
10-210
11-231
12-252
1-22
2-44
3-66
4-88
5-110
6-132
7154
8-176
9-198
10-220
11-242
12-264
1-23
2-46
3-69
4-92
5-115
6-138
7-161
8-184
9-207
10-230
11-253
12-276
1-24
2-48
3-72
4-96
5-120
6-144
7-138
8-192
9-216
10-240
11-264
12-288
1-25
2-50
3-75
4-100
5-125
6-150
7-175
8-200
9-225
10-250
11-275
12-300
26/2/53 โพสต์โดย คนอยากจะรวย
2 จาก 10
เอาเวปไปเหอะ
http://school.obec.go.th/khungyuamvitaya/web2008/webmaster/suksasongkorw1/Web_student/namo%20601/noname4.htm
26/2/53 โพสต์โดย คนอยากจะรวย
3 จาก 10
ขอยอมแพ้
26/2/53 โพสต์โดย เฮียเต้ย คลับหื่น แห่ง fm 88.5
4 จาก 10
26x1 = 26
26x2 = 52
26x3 = 78
26x4 = 104
26x5 = 130
26x6 = 156
26x7 = 182
26x8 = 208
26x9 = 234
26x10 = 260
26x11 = 286
26x12 = 312
27x1 = 27
27x2 = 54
27x3 = 81
27x4 = 108
27x5 = 135
27x6 = 162
27x7 = 189
27x8 = 216
27x9 = 243
27x10 = 270
27x11 = 297
27x12 = 324
28x1 = 28
28x2 = 56
28x3 = 84
28x4 = 112
28x5 = 140
28x6 = 168
28x7 = 196
28x8 = 224
28x9 = 252
28x10 = 280
28x11 = 308
28x12 = 336
29x1 = 29
29x2 = 58
29x3 = 87
29x4 = 116
29x5 = 145
29x6 = 174
29x7 = 203
29x8 = 232
29x9 = 261
29x10 = 290
29x11 = 319
29x12 = 348
30x1 = 30
30x2 = 60
30x3 = 90
30x4 = 120
30x5 = 150
30x6 = 180
30x7 = 210
30x8 = 240
30x9 = 270
30x10 = 300
30x11 = 330
30x12 = 360
31x1 = 31
31x2 = 62
31x3 = 93
31x4 = 124
31x5 = 155
31x6 = 186
31x7 = 217
31x8 = 248
31x9 = 279
31x10 = 310
31x11 = 341
31x12 = 372
32x1 = 32
32x2 = 64
32x3 = 96
32x4 = 128
32x5 = 160
32x6 = 192
32x7 = 224
32x8 = 256
32x9 = 288
32x10 = 320
32x11 = 352
32x12 = 384
33x1 = 33
33x2 = 66
33x3 = 99
33x4 = 132
33x5 = 165
33x6 = 198
33x7 = 231
33x8 = 264
33x9 = 297
33x10 = 330
33x11 = 363
33x12 = 396
34x1 = 34
34x2 = 68
34x3 = 102
34x4 = 136
34x5 = 170
34x6 = 204
34x7 = 238
34x8 = 272
34x9 = 306
34x10 = 340
34x11 = 374
34x12 = 408
35x1 = 35
35x2 = 70
35x3 = 105
35x4 = 140
35x5 = 175
35x6 = 210
35x7 = 245
35x8 = 280
35x9 = 315
35x10 = 350
35x11 = 385
35x12 = 420
36x1 = 36
36x2 = 72
36x3 = 108
36x4 = 144
36x5 = 180
36x6 = 216
36x7 = 252
36x8 = 288
36x9 = 324
36x10 = 360
36x11 = 396
36x12 = 432
37x1 = 37
37x2 = 74
37x3 = 111
37x4 = 148
37x5 = 185
37x6 = 222
37x7 = 259
37x8 = 296
37x9 = 333
37x10 = 370
37x11 = 407
37x12 = 444
38x1 = 38
38x2 = 76
38x3 = 114
38x4 = 152
38x5 = 190
38x6 = 228
38x7 = 266
38x8 = 304
38x9 = 342
38x10 = 380
38x11 = 418
38x12 = 456
39x1 = 39
39x2 = 78
39x3 = 117
39x4 = 156
39x5 = 195
39x6 = 234
39x7 = 273
39x8 = 312
39x9 = 351
39x10 = 390
39x11 = 429
39x12 = 468
40x1 = 40
40x2 = 80
40x3 = 120
40x4 = 160
40x5 = 200
40x6 = 240
40x7 = 280
40x8 = 320
40x9 = 360
40x10 = 400
40x11 = 440
40x12 = 480
41x1 = 41
41x2 = 82
41x3 = 123
41x4 = 164
41x5 = 205
41x6 = 246
41x7 = 287
41x8 = 328
41x9 = 369
41x10 = 410
41x11 = 451
41x12 = 492
42x1 = 42
42x2 = 84
42x3 = 126
42x4 = 168
42x5 = 210
42x6 = 252
42x7 = 294
42x8 = 336
42x9 = 378
42x10 = 420
42x11 = 462
42x12 = 504
43x1 = 43
43x2 = 86
43x3 = 129
43x4 = 172
43x5 = 215
43x6 = 258
43x7 = 301
43x8 = 344
43x9 = 387
43x10 = 430
43x11 = 473
43x12 = 516
44x1 = 44
44x2 = 88
44x3 = 132
44x4 = 176
44x5 = 220
44x6 = 264
44x7 = 308
44x8 = 352
44x9 = 396
44x10 = 440
44x11 = 484
44x12 = 528
45x1 = 45
45x2 = 90
45x3 = 135
45x4 = 180
45x5 = 225
45x6 = 270
45x7 = 315
45x8 = 360
45x9 = 405
45x10 = 450
45x11 = 495
45x12 = 540
46x1 = 46
46x2 = 92
46x3 = 138
46x4 = 184
46x5 = 230
46x6 = 276
46x7 = 322
46x8 = 368
46x9 = 414
46x10 = 460
46x11 = 506
46x12 = 552
47x1 = 47
47x2 = 94
47x3 = 141
47x4 = 188
47x5 = 235
47x6 = 282
47x7 = 329
47x8 = 376
47x9 = 423
47x10 = 470
47x11 = 517
47x12 = 564
48x1 = 48
48x2 = 96
48x3 = 144
48x4 = 192
48x5 = 240
48x6 = 288
48x7 = 336
48x8 = 384
48x9 = 432
48x10 = 480
48x11 = 528
48x12 = 576
49x1 = 49
49x2 = 98
49x3 = 147
49x4 = 196
49x5 = 245
49x6 = 294
49x7 = 343
49x8 = 392
49x9 = 441
49x10 = 490
49x11 = 539
49x12 = 588
50x1 = 50
50x2 = 100
50x3 = 150
50x4 = 200
50x5 = 250
50x6 = 300
50x7 = 350
50x8 = 400
50x9 = 450
50x10 = 500
50x11 = 550
50x12 = 600
51x1 = 51
51x2 = 102
51x3 = 153
51x4 = 204
51x5 = 255
51x6 = 306
51x7 = 357
51x8 = 408
51x9 = 459
51x10 = 510
51x11 = 561
51x12 = 612
52x1 = 52
52x2 = 104
52x3 = 156
52x4 = 208
52x5 = 260
52x6 = 312
52x7 = 364
52x8 = 416
52x9 = 468
52x10 = 520
52x11 = 572
52x12 = 624
ยังมีอีกถ้าต้องการบอกได้ครับ
27/2/53 โพสต์โดย mickey9732
5 จาก 10
ทำไมไม่ใช้โปรแกม excall สร้างตารางและคำนวนให้ละครับ ผมว่า 10 นาทีน่าจะได้สูตรคูณถึงแม่ 200 แล้วละครับ
28/2/53 โพสต์โดย Thanes
6 จาก 10
2/3/53 โพสต์โดย PPwa
7 จาก 10
ไม่ทำในตาราง Excel ละครับ ง่ายและไวน่ะ จะเอากี่แม่ก็ได้
5/3/53 โพสต์โดย อุ้ยธนิต
8 จาก 10
ไปหาซื้อสมุดที่เค้ามีพิมพ์ด้านหลังซิ เดี๋ยวนี้ดินสอหรือไม่้บรรทัดเค้าก็มีพิมพ์กันหาซื้้อง่ายจะตาย ไป MAKRO ที่เดียวก็มีหมด
หรือไม่ก็เข้าเอ็กเซลทำชีทเองแล้วสั่งพิมพ์จะเอาขนาดไหนล่ะได้ทั้งนั้ง
6/3/53 โพสต์โดย Annie M
9 จาก 10
จะไปให้คะแนนคุณAnne เค้าลบทำไม คนเราพูดจริง แต่ก็ไม่ถูกใจกันเน๊อะ เราว่าก็แค่คำแนะนำนึง
7/3/53 โพสต์โดย kwankwan
10 จาก 10
ใช้ Excel ทำซิครับ แป็บเดียวเอง ^_^ ลองดู
ได้สูตรคูณด้วย ได้ความรู้ด้วย ภูมิใจด้วย(ได้ทำเอง)
12/3/53 โพสต์โดย SuperGoldFish
นอกจากนี้คุณอาจสนใจ
สูตรคูณแม่ 21
100 ตารางวา เท่ากับ กี่เมตร
1 เมตร เท่ากับ กี่ตารางเมตร
เนื้อที่ 84 ตารางเมตรเท่ากับเนื้อที่กว้างคูณยาวเท่าไร
ความยาวของฐาน กับ ฐาน เหมือนกันไหมค่ะ
เข้าสู่ระบบ
ดู กูรู ใน: โทรศัพท์มือถือ | คลาสสิก
©2013 Google - นโยบายส่วนบุคคล - ผู้ช่วยกูรู
|
__label__pos
| 0.838314 |
Take the 2-minute tour ×
Programmers Stack Exchange is a question and answer site for professional programmers interested in conceptual questions about software development. It's 100% free, no registration required.
In my few years of programming, I've toyed with everything from Ruby to C++. I've done everything from just learning basic syntax (Ruby) to completing several major (for me) projects that stretched my abilities with the language. Given this variety (and the fact that truly learning a language never stops), when can I say I know (or have learned) a language?
share|improve this question
We're looking for long answers that provide some explanation and context. Don't just give a one-line answer; explain why your answer is right, ideally with citations. Answers that don't include explanations may be removed.
12
I think the word you're looking for is "mastering" a language, "learned" in this context is a bit strange. – Pieter B Jun 29 '12 at 14:20
7
"Words are for meaning: when you've got the meaning, you can forget the words." - Zhuangzi – jfrankcarr Jun 29 '12 at 15:12
9
when you don't have to ask this question, you will have learned what you need to know ... – Jarrod Roberson Jun 29 '12 at 17:27
1
@JarrodRoberson Is this Zhuangzi who said that too? – jadkik94 Jul 4 '12 at 20:37
2
According to LinkedIn there is very low bar to put up a programming language on your skill resume... According to my LinkedIn people who've seen Java code before apparently feels comfortable listing it in their skills section. – Chris Andrè Dale Jul 6 '12 at 9:25
9 Answers 9
up vote 65 down vote accepted
At what point can you say that you've "learned" a language like English or French? I think most people would accept that you've learned a language when you can use it to communicate. You don't have to know every word or understand every idiom, but you should know most of the (rather small) subset of the language that people use in daily conversation. Having "learned" a language to that extent, you can continue to learn more as you use the language daily and expose yourself to more and more of it. By the time you die, you still won't know all there is to know about the language, but nobody will say that you never "learned" the language.
So it is, I think, with computer languages. You can say that you "know" a language like C++ or Ruby when you can write useful programs in it. If you need to quantify the degree to which you know the language, use the same terms that you'd use for a human language. Say "I know a little C++" or "I know conversational C++" or "I'm fluent in C++" to mean that you know just enough to scrape by, enough to get useful things done, or that you're an expert, respectively.
share|improve this answer
15
The terms conversational and fluent are rather subjective and while appropriate to natural language, it is difficult to see what they mean when applied to a computer language. Although still subjective, functional and idomatic may work better. Functional implies that you know the syntax well enough to write useful constructs, while having an idiomatic knowledge implies that you not only know how to write in the language but also know it well enough to use the idioms of the language, such as using using in C# or b, a = a, b and generators in python. Otherwise, +1. – Mark Booth Jun 29 '12 at 15:16
11
@MarkBooth Any description short of a score on an exam will be somewhat subjective. The point is to use the familiarity that most people already have with levels of knowledge of human languages as a rough guide to knowledge of a computer language, and to do it in a friendly way that invites further investigation. Conversational C++ is shorthand for "I'm familiar with the basics, but you may need to help me with advanced, obscure, or tricky code." – Caleb Jun 29 '12 at 15:31
3
While I think you're mostly right, I think a lot of people wrongly assume they have learned a natural language when they still have a miserable level. And it applies also to people who say they've learned programming languages. – haylem Jul 1 '12 at 13:26
I think your answer is perfect, it even shows how much it's impossible to quantify language knowledge. Indeed, while fluent means you can talk about most anything, with about as much explanations as a native speaker would require, this does not address the fact that most native speakers don't possess a tenth of the language knowledge good old writers have (no, not harry potter or dan brown, real old style hard to read stuff). How would you address that, when in fact nobody has bothered addressing it for languages ? and when in fact it does strongly impact productivity in programming ? – Morg. Aug 23 '12 at 12:54
1
@Morg. What utter rubbish. Programming is as much about communicating with future programmers (including yourself) as it is with telling the computer what to do. Sure, simple write one code might not need to be communicated well, but in the real world making your intentions clear to future maintainers of the code is far more important than getting the computer to do the right thing. Anyway, these comments are no longer serving to help improve this answer, so I would suggest that we take this to Programmers Chat if you want to discuss it further. – Mark Booth Sep 5 '12 at 12:14
This is more of a opinion based factor. In all technical terms "Learned" would mean having a complete knowledge of, as in you would be expected to know EVERYTHING of the language.
However in the world of programming I believe it's more about if you can program fluently in the language to complete a objective/task. It also probably means if you're comfortable with the language. Sure you could go out there and get a degree after years and years of college, but that's only to get noticed? You define when you can say you've learned a language.
share|improve this answer
You can rarely say you've learned a language. That implies you're done learning about it, which is... foolish.
Saying you know a language is fine when you can write code that isn't blatantly breaking idioms in the language (maybe after a week of de-rusting the knowledge) without referring to a syntax reference.
share|improve this answer
2
Idioms change, styles vary from one place to another, and indeed languages themselves change over time. If you can't say that you "know" or "have learned" a language like C++ until you're done learning about it, then there are very few people indeed who can claim to have learned C++. – Caleb Jun 29 '12 at 14:46
1
@Caleb exactly. – Mike Brown Jun 29 '12 at 14:55
3
If you learning like that, then you don't know even your native language, which is foolish too (at least for most of people) – superM Jun 29 '12 at 15:06
@Caleb, worse than that, many parts of c++ are better when ignored, as Carmack and Torvalds both pointed out - so .. having learned all of cpp is not exactly beneficial. – Morg. Aug 23 '12 at 13:05
My personal answer for this is when,
1. I know the basics to write a simple production-level program.
2. I've spent at least 3 straight hours solving a problem that seemed simple but wasn't.
From there on, I wouldn't call it learning, but mastery.
share|improve this answer
I think people are focusing on the finality of the word "learned" being perfect tense it means you're finished learning. And as everyone noted, programming languages are living entities just like spoken languages. For example someone who had complete mastery of C# 2 would look at C# 3 with LINQ, Lambdas, var keyword, object/collection initializers, and extension methods and find that a lot has changed from what he learned for the previous version. The same is happening with C# 4 with await, async keywords and other new language constructs.
And we're just talking syntax here. We forget that most languages are paired with a framework. C# has the .NET framework, Java has it's own framework, as does Ruby, C++, PHP, and PERL. Think of the framework for a language as the vocabulary and the language itself is just syntax. Knowing the language means you know how to make a sentence in the language. Knowing the framework means you can express yourself well in the language. Finally, once you know the syntax and vocabulary, you need to learn the idioms so that you write code as a "native speaker".
I'd say there are several levels of learning.
1. Hello World (you can write the most rudimentary programs.)
2. Proficient (you can write basic programs but need to consult online help regularly)
3. Fluent (you can write advance programs with some reference needed).
4. Native speaker (you can write advanced programs with minimal online help, your code is indistinguishable from another native speaker's code).
share|improve this answer
2
Nitpick: await is from C# 5, the major new thing in C# 4 was dynamic. – svick Jul 5 '12 at 18:06
You're right...I keep losing track of which version of C# goes with which version of .NET – Mike Brown Jul 5 '12 at 21:12
It depends on who is asking
The person who asks propably has a certain idea what level of skill is expected from someone who says that he has "learned a language."
Basically, i encountered two archetypical scales, by which people judge the answer to the question "have you learned X?"
For most nerds it is a trick question to weed out posers This is already demonstrated by the other answers to your question ;). If you say you learned it you fail in seeing the scope of the task, which is implicitly "perfection", and, much worse: you fail in humility.
For most bosses, you "have learned it", if you can solve their problems, a criterion oftentimes already met if your code compiles. Answering his question in a humble way will lead to him assigning the task to a poser who may also be a relative.
So, what's your audience?
share|improve this answer
4
In interviews, you know it if you know it. In the workplace, you know it if Google knows it. – asfallows Jul 6 '12 at 14:07
2
In interviews, it's "i know it" if the interviewer wears a tie, and it's "please define 'to know'" if he does not ;). To be fair, most interviewers ask if one "has experience with", to which i am more easily inclined to answer with "yes" – keppla Jul 12 '12 at 15:17
When you start noticing its flaws. Once you start learning a language you are usually still busy learning now to do stuff or are much involved in it to notice where it fails (of course, this is unless you start learning it from a negative point of view just to see how worse it is compares to your favorite language so far). I'd say you don't truly know a language until you are acutely aware of the areas where it fails.
share|improve this answer
I can't agree with that. It may be true for the first language or two you learn — you may simply not know what to look for, so even the most bletcherous design failures are indistinguishable from the right way to do things. Once you've learned a few languages, you should start to be able to see language flaws soon after you start learning a new one. But then for a while in your development, you will erroneously equate "different" with "wrong." Until you learn a few languages of each major type (procedural, OO, functional, declarative...) your immediate judgements will be suspect. – Warren Young Jul 2 '12 at 23:56
At the point when your application or component can read mail, i.e. Zawinski's law, which states:
“Every program attempts to expand until it can read mail. Those programs which cannot so expand are replaced by ones which can.” Coined by Jamie Zawinski (who called it the “Law of Software Envelopment”) to express his belief that all truly useful programs experience pressure to evolve into toolkits and application platforms (the mailer thing, he says, is just a side effect of that).
This was updated when RSS became popular to "... can read RSS feeds" and probably could be updated these days to "... can read Twitter feeds". ;-)
share|improve this answer
I think you have learned a language when you don't see it. You don't see the for cycles and data accesses, but the workflow of the algorithm.
You don't play tricks with parameter passing and array processing, magic macros. You don't think of your code as a self marketing material to show to your friends and teachers, but to express your understanding of a problem, and your will to the computer the most readable way.
You have habits that help you avoiding lots of mistakes just by following them, like: use {} code blocks and () in expressions even if you are absolutely sure that "it will be only one line" or "I am no fool and know operator precedence order".
You stop counting how many classes, patterns and framework APIs you know. This is what intellisense, the references and tutorials, and the Google is for. But when you look at a problem, you immediately know what parts it can be split, and with what tools and algorithms you will solve them.
What you do works, is elegant and small. You enjoy coding and easily find your bugs. Yes, masters also make bugs because they are humans - but they find them quickly because their code structure leads them to the right place.
And finally realize: you have learned to think, analyze and solve problems; compared to them, the actual language is secondary, just the current box of rules and tools.
share|improve this answer
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.642932 |
Windows Azure Diagnostics: Initializing the Configuration and Using a Configuration File
Microsoft Windows Azure Development Cookbook
Microsoft Windows Azure Development Cookbook
Over 80 advanced recipes for developing scalable services with the Windows Azure platform
Read more about this book
(For more resources on this subject, see here.)
The implementation of Windows Azure Diagnostics was changed in Windows Azure SDK v1.3 and it is now one of the pluggable modules that have to be explicitly imported into a role in the service definition file. As Windows Azure Diagnostics persists both its configuration and data to Windows Azure storage, it is necessary to specify a storage service account for diagnostics in the service configuration file. The configuration of Windows Azure Diagnostics is performed at the instance level. The code to do that configuration is at the role level, but the diagnostics configuration for each instance is stored in individual blobs in a container named wad-control-container located in the storage service account configured for Windows Azure Diagnostics.
Initializing the configuration of Windows Azure Diagnostics
The Windows Azure Diagnostics module is imported into a role by the specification of an Import element with a moduleName attribute of Diagnostics in the Imports section of the service definition file (ServiceDefinition.csdef). This further requires the specification, in the service configuration file (ServiceConfiguration.cscfg), of a Windows Azure Storage Service account that can be used to access the instance configuration for diagnostics. This configuration is stored as an XML file in a blob, named for the instance, in a container named wad-control-container in the storage service account configured for diagnostics.
The Diagnostics Agent service is started automatically when a role instance starts provided the diagnostics module has been imported into the role. Note that in Windows Azure SDK versions prior to v1.3, this is not true in that the Diagnostics Agent must be explicitly started through the invocation of DiagnosticMonitor.Start().
On instance startup, the diagnostics configuration for the instance can be set as desired in the overridden RoleEntryPoint.OnStart() method. The general idea is to retrieve the default initial configuration using DiagnosticMonitor.GetDefaultInitialConfiguration() and modify it as necessary before saving it using DiagnosticMonitor.Start(). This name is something of a relic, since Windows Azure SDK v1.3 and later, the Diagnostics Agent service is started automatically.
Another way to modify the diagnostics configuration for the instance is to use RoleInstanceDiagnosticManager.GetCurrentConfiguration() to retrieve the existing instance configuration from wad-control-container. This can be modified and then saved using RoleInstanceDiagnosticManager.SetCurrentConfiguration(). This method can be used both inside and outside the role instance. For example, it can be implemented remotely to request that an on-demand transfer be performed. An issue is that using this technique during instance startup violates the principal that the environment on startup is always the same, as the existing instance configuration may already have been modified. Note that it is not possible to modify the diagnostics configuration for an instance if there is a currently active on-demand transfer.
In this recipe, we will learn how to initialize programmatically the configuration of Windows Azure Diagnostics.
How to do it...
We are going to see how to initialize the configuration for Windows Azure Diagnostics using code. We do this as follows:
1. Use Visual Studio to create an empty cloud project.
2. Add a web role to the project (accept the default name of WebRole1).
3. Add the following assembly reference to the project:
System.Data.Services.Client
4. In the WebRole class, replace OnStart() with the following:
public override bool OnStart()
{
WadManagement wadManagement = new WadManagement();
wadManagement.InitializeConfiguration();
return base.OnStart();
}
5. In the Default.aspx file, replace the asp:Content element named BodyContent with the following:
<asp:Content ID="BodyContent" runat="server"
ContentPlaceHolderID="MainContent">
<div id="xmlInner">
<pre>
<asp:label id="xmlLabel" runat="server"/>
</pre>
</div>
</asp:Content>
6. Add the following using statements to the Default.aspx.cs file:
using Microsoft.WindowsAzure.ServiceRuntime;
7. In the Default.aspx.cs file, add the following private members to the _Default class:
private String deploymentId = RoleEnvironment.DeploymentId;
private String roleName =
RoleEnvironment.CurrentRoleInstance.Role.Name;
private String instanceId =
RoleEnvironment.CurrentRoleInstance.Id;
8. In the Default.aspx.cs file, replace Page_Load() with the following:
protected void Page_Load(object sender, EventArgs e)
{
WadManagement wad = new WadManagement();
String wadConfigurationForInstance =
wad.GetConfigurationBlob(
deploymentId, roleName, instanceId);
xmlLabel.Text =
Server.HtmlEncode(wadConfigurationForInstance);
}
9. Add a class named WadManagement to the project.
10. Add the following using statements to the WadManagement class:
using Microsoft.WindowsAzure;
using Microsoft.WindowsAzure.Diagnostics;
using Microsoft.WindowsAzure.Diagnostics.Management;
using Microsoft.WindowsAzure.ServiceRuntime;
using Microsoft.WindowsAzure.StorageClient;
11. Add the following private members to the WadManagement class:
private String wadConnectionString =
"Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString";
private String wadControlContainerName =
"wad-control-container";
private CloudStorageAccount cloudStorageAccount;
12. Add the following constructor to the WadManagement class:
public WadManagement()
{
cloudStorageAccount = CloudStorageAccount.Parse(
RoleEnvironment.GetConfigurationSettingValue(
wadConnectionString));
}
13. Add the following methods, retrieving the instance configuration blob from Windows Azure Storage, to the WadManagement class:
public String GetConfigurationBlob(
String deploymentId, String roleName, String instanceId)
{
DeploymentDiagnosticManager deploymentDiagnosticManager =
new DeploymentDiagnosticManager(
cloudStorageAccount, deploymentId);
String wadConfigurationBlobNameForInstance =
String.Format("{0}/{1}/{2}", deploymentId, roleName,
instanceId);
String wadConfigurationForInstance =
GetWadConfigurationForInstance(
wadConfigurationBlobNameForInstance);
return wadConfigurationForInstance;
}
private String GetWadConfigurationForInstance(
String wadConfigurationInstanceBlobName)
{
CloudBlobClient cloudBlobClient =
cloudStorageAccount.CreateCloudBlobClient();
CloudBlobContainer cloudBlobContainer =
cloudBlobClient.GetContainerReference(
wadControlContainerName);
CloudBlob cloudBlob = cloudBlobContainer.GetBlobReference(
wadConfigurationInstanceBlobName);
String wadConfigurationForInstance =
cloudBlob.DownloadText();
return wadConfigurationForInstance;
}
14. Add the following method, initializing the configuration of Windows Azure Diagnostics, to the WadManagement class:
public void InitializeConfiguration()
{
String eventLog = "Application!*";
String performanceCounter =
@"\Processor(_Total)\% Processor Time";
DiagnosticMonitorConfiguration dmc =
DiagnosticMonitor.GetDefaultInitialConfiguration();
dmc.DiagnosticInfrastructureLogs.BufferQuotaInMB = 100;
dmc.DiagnosticInfrastructureLogs.ScheduledTransferPeriod =
TimeSpan.FromHours(1);
dmc.DiagnosticInfrastructureLogs.
ScheduledTransferLogLevelFilter = LogLevel.Verbose;
dmc.WindowsEventLog.BufferQuotaInMB = 100;
dmc.WindowsEventLog.ScheduledTransferPeriod =
TimeSpan.FromHours(1);
dmc.WindowsEventLog.ScheduledTransferLogLevelFilter =
LogLevel.Verbose;
dmc.WindowsEventLog.DataSources.Add(eventLog);
dmc.Logs.BufferQuotaInMB = 100;
dmc.Logs.ScheduledTransferPeriod = TimeSpan.FromHours(1);
dmc.Logs.ScheduledTransferLogLevelFilter =
LogLevel.Verbose;
dmc.Directories.ScheduledTransferPeriod =
TimeSpan.FromHours(1);
PerformanceCounterConfiguration perfCounterConfiguration
= new PerformanceCounterConfiguration();
perfCounterConfiguration.CounterSpecifier =
performanceCounter;
perfCounterConfiguration.SampleRate =
System.TimeSpan.FromSeconds(10);
dmc.PerformanceCounters.DataSources.Add(
perfCounterConfiguration);
dmc.PerformanceCounters.BufferQuotaInMB = 100;
dmc.PerformanceCounters.ScheduledTransferPeriod =
TimeSpan.FromHours(1);
DiagnosticMonitor.Start(cloudStorageAccount, dmc);
}
How it works...
In steps 1 and 2, we create a cloud project with a web role. We add the required assembly reference in step 3.
In step 4, we modify OnStart(), so that it initializes the configuration of Windows Azure Diagnostics.
In step 5, we modify the default web page, so that it displays the content of the blob storing the instance configuration for Windows Azure Diagnostics. In step 6, we add the required using statement to Default.aspx.cs. In step 7, we add some private members to store the deployment ID, the role name, and the instance ID of the current instance. In step 8, we modify the Page_Load() event handler to retrieve the blob content and display it on the default web page.
In step 9, we add the WadManagement class that interacts with the Windows Azure Blob Service. In step 10, we add the required using statements. In step 11, we add some private members to contain the name of the connection string in the service configuration file, and the name of the blob container containing the instance configuration for Windows Azure Diagnostics. We also add a CloudStorageAccount instance, which we initialize in the constructor we add in step 12.
We then add, in step 13, the two methods we use to retrieve the content of the blob containing the instance configuration for Windows Azure Diagnostics. In GetConfigurationBlob(), we first create the name of the blob. We then pass this into the GetWadConfigurationForInstance() method, which invokes various Windows Azure Storage Client Library methods to retrieve the content of the blob.
In step 14, we add the method to initialize the configuration of Windows Azure Diagnostics for the instance. We first specify the names of the event log and performance counter we want to capture and persist. We then retrieve the default initial configuration and configure capture of the Windows Azure infrastructure logs, Windows Event Logs, basic logs, directories, and performance counters. For each of them, we specify a data buffer size of 100 MB and schedule an hourly transfer of logged data.
For Windows Event Logs, we specify that the Application!* event log should be captured locally and persisted to the storage service. The event log is specified using an XPath expression allowing the events to be filtered, if desired. We can add other event logs if desired. We configure the capture and persistence of only one performance counter—the \Processor(_Total)\% Processor Time. We can add other performance counters if desired. Two sections at the end of this recipe provide additional details on the configuration of event logs and performance counters.
We specify a transfer schedule for the directories data buffer. The Diagnostics Agent automatically inserts special directories into the configuration: crash dumps for all roles, and IIS logs and IIS failed request logs for web roles. The Diagnostics Agent does this because the actual location of the directories is not known until the instance is deployed. Note that even though we have configured a persistence schedule for crash dumps, they are not captured by default. We would need to invoke the CrashDumps.EnableCollection() method to enable the capture of crash dumps.
Read more about this book
(For more resources on this subject, see here.)
There's more...
We can also modify an existing diagnostics configuration for an instance. We do this by adding the following method to WadManagement class, and then invoking it in the same way we invoked InitializeConfiguration():
public void ModifyConfiguration(
String deploymentId, String roleName, String instanceId)
{
String eventLog =
@"Application!*[System[Provider[@Name='.NET Runtime']]]";
String performanceCounter = @"\ASP.NET\Requests Rejected";
RoleInstanceDiagnosticManager ridm =
cloudStorageAccount.CreateRoleInstanceDiagnosticManager(
deploymentId, roleName, instanceId);
DiagnosticMonitorConfiguration dmc =
ridm.GetCurrentConfiguration();
Int32 countDataSources =
dmc.WindowsEventLog.DataSources.Count(
item => item == eventLog);
if (countDataSources == 0)
{
dmc.WindowsEventLog.DataSources.Add(eventLog);
dmc.WindowsEventLog.ScheduledTransferPeriod =
TimeSpan.FromHours(1);
}
countDataSources =
dmc.PerformanceCounters.DataSources.Count(
item => item.CounterSpecifier == performanceCounter);
if (countDataSources == 0)
{
PerformanceCounterConfiguration perfConfiguration =
new PerformanceCounterConfiguration()
{
CounterSpecifier = performanceCounter,
SampleRate = System.TimeSpan.FromHours(1)
};
dmc.PerformanceCounters.DataSources.Add(
perfConfiguration);
}
IDictionary<DataBufferName, OnDemandTransferInfo>
activeTransfers = ridm.GetActiveTransfers();
if (activeTransfers.Count == 0)
{
ridm.SetCurrentConfiguration(dmc);
}
}
This method can be used locally inside a role, or remotely, to modify the diagnostics configuration for the instance identified by the deploymentId, roleName, and instanceId with which the method is parameterized.
If the method is invoked remotely from an application, then the code to create the CloudStorageAccount must be modified to the following:
CloudStorageAccount cloudStorageAccount =
CloudStorageAccount.Parse(
ConfigurationManager.AppSettings["DataConnectionString"]);
This assumes that DataConnectionString specified in the following canonical format is an app.config file setting:
<Setting name="DataConnectionString"
value="DefaultEndpointsProtocol=http;AccountName=ACCOUNT;AccountK
ey=KEY" />
We also need to add an assembly reference to System.configuration.dll and the related using statement.
In the method, we initialize variables to specify the Windows Event Log and the performance counter we wish to configure. Note that we use an XPath expression to filter the event log to persist only events coming from the .NET Runtime source.
We create a RoleInstanceDiagnosticManager object for the instance and use it to retrieve its DiagnosticMonitorConfiguration. Having ensured that we have not already added the event log and performance counter, we add them to the diagnostics configuration for the instance.
Finally, we invoke RoleInstanceDiagnosticManager.SetCurrentConfiguration() to save the modified configuration to the diagnostics configuration for the instance stored in wad-control-container. We save the configuration only if there is no active on-demand transfer, as SetCurrentConfiguration() throws an exception if an on-demand transfer is in progress.
Configuring the Event Log data buffer
An XPath expression can be used to filter the Windows Event Log events that Windows Azure Diagnostics persists to WADWindowsEventLogsTable table. This recipe contains the following two examples of the filter expression:
1. Application!*
2. Application!*[System[Provider[@Name='.NET Runtime']]]
The first persists all events, while the second persists only those events from the .NET Runtime source.
Steve Marx has written a blog post in which he shows how to use the Event Viewer to generate a filter expression at the following URL:
http://blog.smarx.com/posts/capturing-filtered-windows-events-with-windows-azure-diagnostics
Configuring the performance counter data buffer
There are a large number of performance counters which Windows Azure Diagnostics can capture and persist to the WADPerformanceCountersTable table. It is also possible to add custom performance counters to an instance. The typeperf command can be used at the command prompt to generate a list of available performance counters. The following lists all performance counters on the current system:
C:\Users\Administrator>typeperf -q
The following command filters the list to only those performance counters for the ASP.NET object:
C:\Users\Administrator>typeperf -q ASP.NET
Note that there are also performance counters for objects, such as ASP.NET Applications.
Read more about this book
(For more resources on this subject, see here.)
Using a configuration file with Windows Azure Diagnostics
Windows Azure Diagnostics stores the diagnostics configuration for an instance as an XML file in a blob, named for the instance, in a container named wad-control-container in the Windows Azure Storage Service account configured for diagnostics. When an instance is started for the first time, a default instance configuration is inserted in the container. This can be modified programmatically, either local to the instance or remotely using methods in the Windows Azure SDK.
Local configuration typically occurs in the OnStart() method for the role and is used to further specify the information captured by Windows Azure Diagnostics and the schedule with which diagnostic data is persisted in the Windows Azure Storage Service. By default, some diagnostics data is captured, but none of it is persisted to the storage service. Remote configuration can also be used to modify the information captured. However, it is used normally to initiate an on-demand transfer of data from an instance when a problem must be investigated before the next scheduled transfer of data to the storage service.
Windows Azure SDK v1.3 introduced a way to specify declaratively the initial configuration of Windows Azure Diagnostics for a role. This requires the placement of a file, named diagnostics.wadcfg, in a specific location in the role package. When an instance is started for the first time, the Diagnostic Agent reads the file and initializes the diagnostic configuration for the instance in wad-control-container with it, instead of using the default configuration.
During instance startup, the Diagnostics Agent uses the following sequence to search for a diagnostics configuration to use:
1. Diagnostic configuration for the instance in wad-control-container.
2. Configuration specified programmatically when the instance is started.
3. Configuration specified in diagnostics.wadcfg.
4. Default configuration.
The physical location of diagnostics.wadcfg in a hosted service depends on the role type. For a worker role, the location is %RoleRoot% while for a web role, the location is %RoleRoot%\bin. For a VM role, diagnostics.wadcfg must be in the following directory in the uploaded Guest OS image:
%ProgramFiles%\Windows Azure Integration Components\v1.0\Diagnostics
The information contained in diagnostics.wadcfg mirrors that stored in the instance configuration in wad-control-container and exposed through the DiagnosticsMonitorConfiguration class in the Windows Azure SDK.
The intended use of the diagnostics.wadcfg file is in situations where programmatic configuration is not possible. The primary use case is in a VM role where there is no RoleEntryPoint and no OnStart() method in which to perform programmatic configuration. Another example is when special logging is required during a startup task.
In this recipe, we will learn how to configure Windows Azure Diagnostics using the diagnostics.wadcfg file.
How to do it...
We are going to see how to use a configuration file to configure Windows Azure Diagnostics. We do this as follows:
1. Create a diagnostics.wadcfg file in the root directory of the role.
2. In the Visual Studio Properties for the diagnostics.wadcfg file, set the Build Action to Content and the Copy to Output Directory to Copy Always.
3. Insert the following root element in the file:
<DiagnosticMonitorConfiguration
xmlns="http://schemas.microsoft.com/ServiceHosting/2010/10/
DiagnosticsConfiguration"
configurationChangePollInterval="PT1M"
overallQuotaInMB="4096" />
4. Insert the following as a child element of the DiagnosticMonitorConfiguration element:
<DiagnosticInfrastructureLogs
bufferQuotaInMB="100"
scheduledTransferLogLevelFilter="Verbose"
scheduledTransferPeriod="PT1H"/>
5. Insert the following as a child element of the DiagnosticMonitorConfiguration element:
<WindowsEventLog
bufferQuotaInMB="100"
scheduledTransferLogLevelFilter="Verbose"
scheduledTransferPeriod="PT1H">
<DataSource name="Application!*"/>
</WindowsEventLog>
6. Insert the following as a child element of the DiagnosticMonitorConfiguration element:
<Logs
bufferQuotaInMB="100"
scheduledTransferLogLevelFilter="Verbose"
scheduledTransferPeriod="PT1H"/>
7. Insert the following as a child element of the DiagnosticMonitorConfiguration element:
<Directories
bufferQuotaInMB="1024"
scheduledTransferPeriod="PT1H">
<CrashDumps container="wad-crash-dumps"
directoryQuotaInMB="256"/>
<FailedRequestLogs container="wad-frq"
directoryQuotaInMB="256"/>
<IISLogs container="wad-iis-test"
directoryQuotaInMB="256"/>
<DataSources>
<DirectoryConfiguration
container="wad-custom"
directoryQuotaInMB="20">
<LocalResource name="CustomLoggingLocation"
relativePath="Logs"/>
</DirectoryConfiguration>
</DataSources>
</Directories>
8. Insert the following as a child element of the DiagnosticMonitorConfiguration element:
<PerformanceCounters
bufferQuotaInMB="100" scheduledTransferPeriod="PT20M">
<PerformanceCounterConfiguration
counterSpecifier="\Processor(_Total)\% Processor Time"
sampleRate="PT10S"/>
</PerformanceCounters>
9. Add the following as a child element of the WebRole or WorkerRole element in the service definition file, ServiceDefinition.csdef:
<LocalResources>
<LocalStorage name="CustomLoggingLocation"
sizeInMB="20" cleanOnRoleRecycle="false"/>
</LocalResources>
How it works...
In steps 1 and 2, we create the diagnostics.wadcfg file and set the Visual Studio build action. The appropriate location for the file depends on the role. The Visual Studio tooling ensures that the file is copied to the correct location when the project is built.
In step 3, we create the root element of the file and set the maximum size of the diagnostics data buffer to 4 GB. The configurationChangePollInterval specifies the interval at which the Diagnostics Agent polls the storage service for diagnostic configuration changes. This is provided in ISO 8601 format. For example, with a configurationChangePollInterval of PT1M, the P indicates duration, the T indicates time, and the 1M indicates one minute. The same format is used for other intervals and periods in the configuration file.
In step 4, we configure the diagnostics infrastructure logs into which the Diagnostic Agent logs data about itself. We specify that this data is stored locally in a 100 MB data buffer and that the information is persisted to the storage service once an hour (PT1H). We also filter the persistence of data, so that all events with a level of verbose or higher are transferred.
In step 5, we configure the capture and persistence of the Windows Event Logs. We specify that all verbose or higher events in the Application event log should be transferred once an hour. We can add more DataSource elements to specify the capture and persistence of additional Windows Event Logs. In step 6, we configure the capture and persistence of basic logs from the Windows Azure Diagnostics trace listener.
In step 7, we configure the capture and persistence of data located in directories on the local file system. For crash dumps, failed (IIS) request logs, and IIS logs, we specify the Windows Azure Blob Service container in which the data is persisted, but we don't specify the directory in which the data is located because that is not known until the hosted service starts. The Diagnostics Agent fixes the configuration of these logs when it writes the diagnostics configuration for the instance to wad-control-container.
For custom directories, we must specify the name of a local resource, configured in the service definition file, where we write the logs to. We use the relativePath attribute to specify a directory under the root path of the local resource. We also specify the Windows Azure Blob Service container into which the logs are persisted. We can add more DirectoryConfiguration elements to specify the capture and persistence of files in other directories.
In step 8, we configure the capture and persistence of performance counters data. We specify that the % processor time should be captured every 10 seconds (PT10S) and persisted to the storage service once an hour. We can add more PerformanceCounterConfiguration elements to specify the capture and persistence of other performance counters.
In step 9, we add the definition for the local resource, used in custom logging, to the service definition file.
There's more...
Andy Cross has written a blog post in which he describes how to perform XML validation on the diagnostics.wadcfg file at the following URL:
http://blog.bareweb.eu/2011/02/file-based-diagnostics-config-with-intellisense-in-azure-sdk-1-3/
Summary
In this article we covered:
• Initializing the configuration of Windows Azure Diagnostics
• Using a configuration file with Windows Azure Diagnostics
Further resources on this subject:
You've been reading and excerpt of:
Microsoft Windows Azure Development Cookbook
Explore Title
comments powered by Disqus
|
__label__pos
| 0.812141 |
From 083edd022de8ff57c1e538a0cc71ec50b8dec806 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kim=20Nguy=E1=BB=85n?= Date: Fri, 20 Feb 2015 14:11:30 +0100 Subject: [PATCH] Seal the representation of SortedList.Make(X).t (by making the type private). Expose Var.Set as a SortedList.S --- compile/compile.ml | 20 +++--- depend | 2 +- misc/custom.ml | 14 ++-- runtime/value.ml | 6 +- types/patterns.ml | 26 ++++---- types/sortedList.ml | 155 ++++++++++++++++++++++++++++++++----------- types/sortedList.mli | 67 ++++++++++--------- types/types.ml | 47 +++++++------ types/var.ml | 5 -- types/var.mli | 18 +---- typing/typed.ml | 2 +- typing/typer.ml | 10 +-- 12 files changed, 216 insertions(+), 156 deletions(-) diff --git a/compile/compile.ml b/compile/compile.ml index 58aa65c4..6aa8f6b3 100644 --- a/compile/compile.ml +++ b/compile/compile.ml @@ -76,13 +76,13 @@ let enter_global_cu cu env x = let rec domain = function |Identity -> Var.Set.empty |List l -> Types.Tallying.domain l - |Comp (s1,s2) -> Var.Set.union (domain s1) (domain s2) + |Comp (s1,s2) -> Var.Set.cup (domain s1) (domain s2) |Sel(_,_,sigma) -> (domain sigma) let rec codomain = function | Identity -> Var.Set.empty | List(l) -> Types.Tallying.codomain l - | Comp(s1,s2) -> Var.Set.union (codomain s1) (codomain s2) + | Comp(s1,s2) -> Var.Set.cup (codomain s1) (codomain s2) | Sel(_,_,sigma) -> (codomain sigma) let fresharg = @@ -111,7 +111,7 @@ let rec comp s1 s2 = match s1, s2 with | res -> comp s3 (comp res s6)) (* If a variable in the image of s2 is in the domain of s1 we can't simplify *) - | _, _ when not (Var.Set.is_empty (Var.Set.inter (domain s1) (codomain s2))) + | _, _ when not (Var.Set.is_empty (Var.Set.cap (domain s1) (codomain s2))) -> Comp(s1, s2) | List(_), List(_) | Sel(_), List(_) -> @@ -135,8 +135,8 @@ and compile_aux env te = function let is_mono x = if Var.Set.is_empty ts then true else let from_xi = try IdMap.assoc x env.xi with Not_found -> Var.Set.empty in - let d = Var.Set.inter from_xi (domain(env.sigma)) in - Var.Set.is_empty (Var.Set.inter ts d) + let d = Var.Set.cap from_xi (domain(env.sigma)) in + Var.Set.is_empty (Var.Set.cap ts d) in if is_mono x then Var (v) else TVar(v,env.sigma) | Typed.Subst(e,sl) -> @@ -193,15 +193,15 @@ and compile_abstr env a = List.fold_left(fun acc (t1,t2) -> let ts1 = Types.all_vars t1 in let ts2 = Types.all_vars t2 in - let tu = Var.Set.union ts1 ts2 in - Var.Set.union acc tu + let tu = Var.Set.cup ts1 ts2 in + Var.Set.cup acc tu ) Var.Set.empty a.Typed.fun_iface in if Var.Set.is_empty vars then true else if env.sigma = Identity then false else let d = domain(env.sigma) in - Var.Set.is_empty (Var.Set.inter d vars) + Var.Set.is_empty (Var.Set.cap d vars) in let (slots,nb_slots,fun_env) = (* we add a nameless empty slot for the argument *) @@ -258,7 +258,7 @@ and compile_branches env (brs : Typed.branches) = (* p_i / t_i -> br.Typed.br_pat / br.Typed.br_type *) and compile_branch env br = - let env = List.fold_left enter_local env (Patterns.fv br.Typed.br_pat) in + let env = List.fold_left enter_local env (IdSet.get (Patterns.fv br.Typed.br_pat)) in let env = { env with xi = IdMap.merge (fun _ v2 -> v2) env.xi br.Typed.br_vars_poly @@ -278,7 +278,7 @@ let compile_expr env e = let compile_let_decl env decl = let pat = decl.Typed.let_pat in let e,lsize = compile_expr env decl.Typed.let_body in - let env = enter_globals env (Patterns.fv pat) in + let env = enter_globals env (IdSet.get (Patterns.fv pat)) in let te = decl.Typed.let_body.Typed.exp_typ in let comp = Patterns.Compile.make_branches te [ pat, () ] in diff --git a/depend b/depend index ac1e4868..c989e228 100644 --- a/depend +++ b/depend @@ -371,7 +371,7 @@ types/intervals.cmi : misc/custom.cmo types/chars.cmi : misc/custom.cmo types/atoms.cmi : misc/ns.cmi misc/encodings.cmi misc/custom.cmo types/normal.cmi : -types/var.cmi : misc/custom.cmo +types/var.cmi : types/sortedList.cmi misc/custom.cmo types/boolVar.cmi : types/var.cmi misc/custom.cmo types/types.cmi : types/var.cmi misc/ns.cmi types/intervals.cmi \ types/ident.cmo misc/custom.cmo types/chars.cmi types/boolVar.cmi \ diff --git a/misc/custom.ml b/misc/custom.ml index 235af461..fb6238df 100644 --- a/misc/custom.ml +++ b/misc/custom.ml @@ -108,20 +108,20 @@ module Array(X : T) = struct end module List(X : T) = struct - module Elem = X - type t = X.t list - let dump = dump_list X.dump - let check l = List.iter X.check l + module Elem : T with type t = X.t = X + type t = Elem.t list + let dump = dump_list Elem.dump + let check l = List.iter Elem.check l let rec equal l1 l2 = (l1 == l2) || match (l1,l2) with - | x1::l1, x2::l2 -> (X.equal x1 x2) && (equal l1 l2) + | x1::l1, x2::l2 -> (Elem.equal x1 x2) && (equal l1 l2) | _ -> false let rec hash accu = function | [] -> 1 + accu - | x::l -> hash (17 * accu + X.hash x) l + | x::l -> hash (17 * accu + Elem.hash x) l let hash l = hash 1 l @@ -129,7 +129,7 @@ module List(X : T) = struct if l1 == l2 then 0 else match (l1,l2) with | x1::l1, x2::l2 -> - let c = X.compare x1 x2 in if c <> 0 then c + let c = Elem.compare x1 x2 in if c <> 0 then c else compare l1 l2 | [],_ -> -1 | _ -> 1 diff --git a/runtime/value.ml b/runtime/value.ml index 3b7aa2b0..34a7ef88 100644 --- a/runtime/value.ml +++ b/runtime/value.ml @@ -27,13 +27,13 @@ and t = let rec domain = function | Identity | Mono -> Var.Set.empty | List(l) -> Types.Tallying.domain l - | Comp(s1,s2) -> Var.Set.union (domain s1) (domain s2) + | Comp(s1,s2) -> Var.Set.cup (domain s1) (domain s2) | Sel(_,_,sigma) -> (domain sigma) let rec codomain = function | Identity | Mono -> Var.Set.empty | List(l) -> Types.Tallying.codomain l - | Comp(s1,s2) -> Var.Set.union (codomain s1) (codomain s2) + | Comp(s1,s2) -> Var.Set.cup (codomain s1) (codomain s2) | Sel(_,_,sigma) -> (codomain sigma) (* Comp for Value.sigma but simplify if possible. *) @@ -54,7 +54,7 @@ let rec comp s1 s2 = match s1, s2 with | res -> comp s3 (comp res s6)) (* If a variable in the image of s2 is in the domain of s1 we can't simplify *) - | _, _ when not (Var.Set.is_empty (Var.Set.inter (domain s1) (codomain s2))) + | _, _ when not (Var.Set.is_empty (Var.Set.cap (domain s1) (codomain s2))) -> Comp(s1, s2) | List(_), List(_) | Sel(_), List(_) -> diff --git a/types/patterns.ml b/types/patterns.ml index 822ad18c..34f68adc 100644 --- a/types/patterns.ml +++ b/types/patterns.ml @@ -91,7 +91,7 @@ let pp_node ppf node = print node.descr (Types.id node.accept) Types.Print.pp_type (Types.descr node.accept) - pp_fv node.fv + pp_fv (node.fv :> Id.t list) let counter = ref 0 @@ -507,9 +507,9 @@ module Normal = struct type t = NodeSet.t * Types.t * IdSet.t (* pl,t; t <= \accept{pl} *) - let check (pl,t,xs) = + let check ((pl,t,xs) : t) = List.iter (fun p -> assert(Types.subtype t (Types.descr p.accept))) - (NodeSet.get pl) + (pl :> Node.t list) let print ppf (pl,t,xs) = Format.fprintf ppf "@[(pl=%a;t=%a;xs=%a)@]" NodeSet.dump pl Types.Print.pp_type t @@ -523,11 +523,11 @@ module Normal = struct let equal x y = compare x y == 0 - let first_label (pl,t,xs) = + let first_label ((pl,t,xs) : t) = List.fold_left (fun l p -> Label.min l (first_label (descr p))) (Types.Record.first_label t) - pl + (pl :> Node.t list) end @@ -706,9 +706,9 @@ module Normal = struct let factorize t0 (pl,t,xs) = let t0 = if Types.subtype t t0 then t else Types.cap t t0 in - let vs_var = facto Factorize.var t0 xs pl in + let vs_var = facto Factorize.var t0 xs (NodeSet.get pl) in let xs = IdSet.diff xs vs_var in - let vs_nil = facto Factorize.nil t0 xs pl in + let vs_nil = facto Factorize.nil t0 xs (NodeSet.get pl) in let xs = IdSet.diff xs vs_nil in (vs_var,vs_nil,(pl,t,xs)) @@ -748,7 +748,7 @@ module Normal = struct | (t,res,pl)::tl -> aux_check tl s accu (Types.diff t s) res pl in aux_check [] Types.empty ResultMap.empty (Types.cap t any_basic) - IdMap.empty (List.map descr pl) + IdMap.empty (List.map descr (NodeSet.get pl)) (* let prod_tests (pl,t,xs) = @@ -943,7 +943,7 @@ module Compile = struct Format.fprintf ppf " pat %a@." Normal.Nnf.print p; ) disp.pl - let first_lab t reqs = + let first_lab t (reqs : Normal.Nnf.t array) = let aux l req = Label.min l (Normal.Nnf.first_label req) in let lab = Array.fold_left aux (Types.Record.first_label t) reqs in @@ -953,7 +953,7 @@ module Compile = struct let compute_actions = ref (fun _ -> assert false) - let dispatcher t pl : dispatcher = + let dispatcher t (pl : Normal.Nnf.t array) : dispatcher = try DispMap.find (t,pl) !dispatchers with Not_found -> let lab = first_lab t pl in @@ -1105,7 +1105,7 @@ module Compile = struct (* Collect all subrequests *) let aux reqs (req,_) = let (_,_,((_,tr,xs) as r')) as req' = - if facto then Normal.factorize t req else [],[],req in + if facto then Normal.factorize t req else IdSet.empty,IdSet.empty,req in factorized := NfMap.add req req' !factorized; if IdSet.is_empty xs && Types.subtype t tr then reqs @@ -1216,8 +1216,8 @@ module Compile = struct if IdSet.mem var x || IdSet.mem nil x then has_facto := true else (assert (IdMap.assoc x res = !i); incr i) - ) xs; - Match (List.length xs, (var,nil,xs,e)) + ) (IdSet.get xs); + Match (IdSet.length xs, (var,nil,xs,e)) | [] -> r | _ -> assert false in diff --git a/types/sortedList.ml b/types/sortedList.ml index bea60636..30851903 100644 --- a/types/sortedList.ml +++ b/types/sortedList.ml @@ -1,21 +1,98 @@ +module type S = +sig + module Elem : Custom.T + include Custom.T with type t = private Elem.t list + external get: t -> Elem.t list = "%identity" + + val singleton: Elem.t -> t + val iter: (Elem.t -> unit) -> t -> unit + val filter: (Elem.t -> bool) -> t -> t + val exists: (Elem.t -> bool) -> t -> bool + val fold: ('a -> Elem.t -> 'a) -> 'a -> t -> 'a + val pick: t -> Elem.t option + val choose: t -> Elem.t + val length: t -> int + + val empty: t + val is_empty: t -> bool + val from_list : Elem.t list -> t + val add: Elem.t -> t -> t + val remove: Elem.t -> t -> t + val disjoint: t -> t -> bool + val cup: t -> t -> t + val split: t -> t -> t * t * t + (* split l1 l2 = (l1 \ l2, l1 & l2, l2 \ l1) *) + val cap: t -> t -> t + val diff: t -> t -> t + val subset: t -> t -> bool + val map: (Elem.t -> Elem.t) -> t -> t + val mem: t -> Elem.t -> bool + + module Map: sig + type 'a map + external get: 'a map -> (Elem.t * 'a) list = "%identity" + val add: Elem.t -> 'a -> 'a map -> 'a map + val mem: Elem.t -> 'a map -> bool + val length: 'a map -> int + val domain: 'a map -> t + val restrict: 'a map -> t -> 'a map + val empty: 'a map + val fold: (Elem.t -> 'a -> 'b -> 'b) -> 'a map -> 'b -> 'b + val iter: ('a -> unit) -> 'a map -> unit + val iteri: (Elem.t -> 'a -> unit) -> 'a map -> unit + val filter: (Elem.t -> 'a -> bool) -> 'a map -> 'a map + val is_empty: 'a map -> bool + val singleton: Elem.t -> 'a -> 'a map + val assoc_remove: Elem.t -> 'a map -> 'a * 'a map + val remove: Elem.t -> 'a map -> 'a map + val merge: ('a -> 'a -> 'a ) -> 'a map -> 'a map -> 'a map + val combine: ('a -> 'c) -> ('b -> 'c) -> ('a -> 'b -> 'c) -> + 'a map -> 'b map -> 'c map + val cap: ('a -> 'a -> 'a ) -> 'a map -> 'a map -> 'a map + val sub: ('a -> 'a -> 'a ) -> 'a map -> 'a map -> 'a map + + val merge_elem: 'a -> 'a map -> 'a map -> 'a map + val union_disj: 'a map -> 'a map -> 'a map + val diff: 'a map -> t -> 'a map + val from_list: ('a -> 'a -> 'a ) -> (Elem.t * 'a) list -> 'a map + val from_list_disj: (Elem.t * 'a) list -> 'a map + + val map_from_slist: (Elem.t -> 'a) -> t -> 'a map + val collide: ('a -> 'b -> unit) -> 'a map -> 'b map -> unit + val may_collide: ('a -> 'b -> unit) -> exn -> 'a map -> 'b map -> unit + val map: ('a -> 'b) -> 'a map -> 'b map + val mapi: (Elem.t -> 'a -> 'b) -> 'a map -> 'b map + val constant: 'a -> t -> 'a map + val num: int -> t -> int map + val map_to_list: ('a -> 'b) -> 'a map -> 'b list + val mapi_to_list: (Elem.t -> 'a -> 'b) -> 'a map -> 'b list + val assoc: Elem.t -> 'a map -> 'a + val assoc_present: Elem.t -> 'a map -> 'a + val compare: ('a -> 'a -> int) -> 'a map -> 'a map -> int + val hash: ('a -> int) -> 'a map -> int + val equal: ('a -> 'a -> bool) -> 'a map -> 'a map -> bool + end + module MakeMap(Y : Custom.T) : sig + include Custom.T with type t = Y.t Map.map + end +end + module Make(X : Custom.T) = struct include Custom.List(X) let rec check = function - | x::(y::_ as tl) -> X.check x; assert (X.compare x y < 0); check tl - | [x] -> X.check x; + | x::(y::_ as tl) -> Elem.check x; assert (Elem.compare x y < 0); check tl + | [x] -> Elem.check x; | _ -> () - type elem = X.t - let rec equal l1 l2 = (l1 == l2) || match (l1,l2) with - | x1::l1, x2::l2 -> (X.equal x1 x2) && (equal l1 l2) + | x1::l1, x2::l2 -> (Elem.equal x1 x2) && (equal l1 l2) | _ -> false let rec hash accu = function | [] -> 1 + accu - | x::l -> hash (17 * accu + X.hash x) l + | x::l -> hash (17 * accu + Elem.hash x) l let hash l = hash 1 l @@ -23,7 +100,7 @@ module Make(X : Custom.T) = struct if l1 == l2 then 0 else match (l1,l2) with | x1::l1, x2::l2 -> - let c = X.compare x1 x2 in if c <> 0 then c + let c = Elem.compare x1 x2 in if c <> 0 then c else compare l1 l2 | [],_ -> -1 | _ -> 1 @@ -35,7 +112,7 @@ module Make(X : Custom.T) = struct let exists = List.exists let fold = List.fold_left - external get: t -> elem list = "%identity" + external get: t -> Elem.t list = "%identity" let singleton x = [ x ] let pick = function x::_ -> Some x | _ -> None @@ -49,7 +126,7 @@ module Make(X : Custom.T) = struct if l1 == l2 then l1 == [] else match (l1,l2) with | (t1::q1, t2::q2) -> - let c = X.compare t1 t2 in + let c = Elem.compare t1 t2 in if c < 0 then disjoint q1 l2 else if c > 0 then disjoint l1 q2 else false @@ -59,7 +136,7 @@ module Make(X : Custom.T) = struct if l1 == l2 then l1 else match (l1,l2) with | (t1::q1, t2::q2) -> - let c = X.compare t1 t2 in + let c = Elem.compare t1 t2 in if c = 0 then t1::(cup q1 q2) else if c < 0 then t1::(cup q1 l2) else t2::(cup l1 q2) @@ -71,7 +148,7 @@ module Make(X : Custom.T) = struct let rec split l1 l2 = match (l1,l2) with | (t1::q1, t2::q2) -> - let c = X.compare t1 t2 in + let c = Elem.compare t1 t2 in if c = 0 then let (l1,i,l2) = split q1 q2 in (l1,t1::i,l2) else if c < 0 then let (l1,i,l2) = split q1 l2 in (t1::l1,i,l2) else let (l1,i,l2) = split l1 q2 in (l1,i,t2::l2) @@ -82,7 +159,7 @@ module Make(X : Custom.T) = struct if l1 == l2 then [] else match (l1,l2) with | (t1::q1, t2::q2) -> - let c = X.compare t1 t2 in + let c = Elem.compare t1 t2 in if c = 0 then diff q1 q2 else if c < 0 then t1::(diff q1 l2) else diff l1 q2 @@ -94,7 +171,7 @@ module Make(X : Custom.T) = struct if l1 == l2 then l1 else match (l1,l2) with | (t1::q1, t2::q2) -> - let c = X.compare t1 t2 in + let c = Elem.compare t1 t2 in if c = 0 then t1::(cap q1 q2) else if c < 0 then cap q1 l2 else cap l1 q2 @@ -105,12 +182,12 @@ module Make(X : Custom.T) = struct (l1 == l2) || match (l1,l2) with | (t1::q1, t2::q2) -> - let c = X.compare t1 t2 in + let c = Elem.compare t1 t2 in if c = 0 then ( (* inlined: subset q1 q2 *) (q1 == q2) || match (q1,q2) with | (t1::qq1, t2::qq2) -> - let c = X.compare t1 t2 in + let c = Elem.compare t1 t2 in if c = 0 then subset qq1 qq2 else if c < 0 then false else subset q1 qq2 @@ -140,12 +217,12 @@ module Make(X : Custom.T) = struct match l with | [] -> false | t::q -> - let c = X.compare x t in + let c = Elem.compare x t in (c = 0) || ((c > 0) && (mem q x)) module Map = struct - type 'a map = (X.t * 'a) list - external get: 'a map -> (elem * 'a) list = "%identity" + type 'a map = (Elem.t * 'a) list + external get: 'a map -> (Elem.t * 'a) list = "%identity" let empty = [] let is_empty l = l = [] let singleton x y = [ (x,y) ] @@ -169,7 +246,7 @@ module Make(X : Custom.T) = struct let rec assoc_remove_aux v r = function | ((x,y) as a)::l -> - let c = X.compare x v in + let c = Elem.compare x v in if c = 0 then (r := Some y; l) else if c < 0 then a :: (assoc_remove_aux v r l) else raise Not_found @@ -184,7 +261,7 @@ module Make(X : Custom.T) = struct original list ? *) let rec remove v = function | (((x,y) as a)::rem) as l-> - let c = X.compare x v in + let c = Elem.compare x v in if c = 0 then rem else if c < 0 then a :: (remove v rem) else l @@ -193,7 +270,7 @@ module Make(X : Custom.T) = struct let rec merge f l1 l2 = match (l1,l2) with | ((x1,y1) as t1)::q1, ((x2,y2) as t2)::q2 -> - let c = X.compare x1 x2 in + let c = Elem.compare x1 x2 in if c = 0 then (x1,(f y1 y2))::(merge f q1 q2) else if c < 0 then t1::(merge f q1 l2) else t2::(merge f l1 q2) @@ -203,7 +280,7 @@ module Make(X : Custom.T) = struct let rec combine f1 f2 f12 l1 l2 = match (l1,l2) with | (x1,y1)::q1, (x2,y2)::q2 -> - let c = X.compare x1 x2 in + let c = Elem.compare x1 x2 in if c = 0 then (x1,(f12 y1 y2))::(combine f1 f2 f12 q1 q2) else if c < 0 then (x1,f1 y1)::(combine f1 f2 f12 q1 l2) else (x2, f2 y2)::(combine f1 f2 f12 l1 q2) @@ -213,7 +290,7 @@ module Make(X : Custom.T) = struct let rec cap f l1 l2 = match (l1,l2) with | (x1,y1)::q1, (x2,y2)::q2 -> - let c = X.compare x1 x2 in + let c = Elem.compare x1 x2 in if c = 0 then (x1,(f y1 y2))::(cap f q1 q2) else if c < 0 then cap f q1 l2 else cap f l1 q2 @@ -222,7 +299,7 @@ module Make(X : Custom.T) = struct let rec sub f l1 l2 = match (l1,l2) with | ((x1,y1) as t1)::q1, (x2,y2)::q2 -> - let c = X.compare x1 x2 in + let c = Elem.compare x1 x2 in if c = 0 then (x1,(f y1 y2))::(sub f q1 q2) else if c < 0 then t1::(sub f q1 l2) else sub f l1 q2 @@ -234,7 +311,7 @@ module Make(X : Custom.T) = struct let rec union_disj l1 l2 = match (l1,l2) with | ((x1,y1) as t1)::q1, ((x2,y2) as t2)::q2 -> - let c = X.compare x1 x2 in + let c = Elem.compare x1 x2 in if c = 0 then failwith "SortedList.Map.union_disj" else if c < 0 then t1::(union_disj q1 l2) else t2::(union_disj l1 q2) @@ -247,13 +324,13 @@ module Make(X : Custom.T) = struct match l with | [] -> false | (t,_)::q -> - let c = X.compare x t in + let c = Elem.compare x t in (c = 0) || ((c > 0) && (mem x q)) let rec diff l1 l2 = match (l1,l2) with | (((x1,y1) as t1)::q1, x2::q2) -> - let c = X.compare x1 x2 in + let c = Elem.compare x1 x2 in if c = 0 then diff q1 q2 else if c < 0 then t1::(diff q1 l2) else diff l1 q2 @@ -262,7 +339,7 @@ module Make(X : Custom.T) = struct let rec restrict l1 l2 = match (l1,l2) with | (((x1,y1) as t1)::q1, x2::q2) -> - let c = X.compare x1 x2 in + let c = Elem.compare x1 x2 in if c = 0 then t1::(restrict q1 q2) else if c < 0 then restrict q1 l2 else restrict l1 q2 @@ -306,7 +383,7 @@ module Make(X : Custom.T) = struct let rec may_collide f exn l1 l2 = match (l1,l2) with - | (x1,y1)::l1, (x2,y2)::l2 when X.compare x1 x2 = 0 -> + | (x1,y1)::l1, (x2,y2)::l2 when Elem.compare x1 x2 = 0 -> f y1 y2; may_collide f exn l1 l2 | [], [] -> () | _ -> raise exn @@ -335,7 +412,7 @@ module Make(X : Custom.T) = struct let rec assoc v = function | (x,y)::l -> - let c = X.compare x v in + let c = Elem.compare x v in if c = 0 then y else if c < 0 then assoc v l else raise Not_found @@ -344,7 +421,7 @@ module Make(X : Custom.T) = struct let rec assoc_present v = function | [(_,y)] -> y | (x,y)::l -> - let c = X.compare x v in + let c = Elem.compare x v in if c = 0 then y else assoc_present v l | [] -> assert false @@ -352,7 +429,7 @@ module Make(X : Custom.T) = struct if l1 == l2 then 0 else match (l1,l2) with | (x1,y1)::l1, (x2,y2)::l2 -> - let c = X.compare x1 x2 in if c <> 0 then c + let c = Elem.compare x1 x2 in if c <> 0 then c else let c = f y1 y2 in if c <> 0 then c else compare f l1 l2 | [],_ -> -1 @@ -360,21 +437,21 @@ module Make(X : Custom.T) = struct let rec hash f = function | [] -> 1 - | (x,y)::l -> X.hash x + 17 * (f y) + 257 * (hash f l) + | (x,y)::l -> Elem.hash x + 17 * (f y) + 257 * (hash f l) let rec equal f l1 l2 = (l1 == l2) || match (l1,l2) with | (x1,y1)::l1, (x2,y2)::l2 -> - (X.equal x1 x2) && (f y1 y2) && (equal f l1 l2) + (Elem.equal x1 x2) && (f y1 y2) && (equal f l1 l2) | _ -> false let rec check f = function | (x,a)::((y,b)::_ as tl) -> - X.check x; f a; - assert (X.compare x y < 0); check f tl - | [x,a] -> X.check x; f a + Elem.check x; f a; + assert (Elem.compare x y < 0); check f tl + | [x,a] -> Elem.check x; f a | _ -> () end (* Map *) @@ -392,12 +469,14 @@ module Make(X : Custom.T) = struct let check l = Map.check Y.check l let dump ppf l = List.iter (fun (x,y) -> - Format.fprintf ppf "(%a->%a)" X.dump x Y.dump y) l + Format.fprintf ppf "(%a->%a)" Elem.dump x Y.dump y) l end end (* Make(X : Custom.T) *) + + module type FiniteCofinite = sig type elem type s = private Finite of elem list | Cofinite of elem list diff --git a/types/sortedList.mli b/types/sortedList.mli index 8ddf751c..c1d474ad 100644 --- a/types/sortedList.mli +++ b/types/sortedList.mli @@ -1,24 +1,23 @@ -module Make(X : Custom.T) : +module type S = sig - include Custom.T with type t = X.t list - module Elem : Custom.T with type t = X.t + module Elem : Custom.T + include Custom.T with type t = private Elem.t list + external get: t -> Elem.t list = "%identity" - external get: t -> X.t list = "%identity" - - val singleton: X.t -> t - val iter: (X.t -> unit) -> t -> unit - val filter: (X.t -> bool) -> t -> t - val exists: (X.t -> bool) -> t -> bool - val fold: ('a -> X.t -> 'a) -> 'a -> t -> 'a - val pick: t -> X.t option - val choose: t -> X.t + val singleton: Elem.t -> t + val iter: (Elem.t -> unit) -> t -> unit + val filter: (Elem.t -> bool) -> t -> t + val exists: (Elem.t -> bool) -> t -> bool + val fold: ('a -> Elem.t -> 'a) -> 'a -> t -> 'a + val pick: t -> Elem.t option + val choose: t -> Elem.t val length: t -> int val empty: t val is_empty: t -> bool - val from_list : X.t list -> t - val add: X.t -> t -> t - val remove: X.t -> t -> t + val from_list : Elem.t list -> t + val add: Elem.t -> t -> t + val remove: Elem.t -> t -> t val disjoint: t -> t -> bool val cup: t -> t -> t val split: t -> t -> t * t * t @@ -26,26 +25,26 @@ sig val cap: t -> t -> t val diff: t -> t -> t val subset: t -> t -> bool - val map: (X.t -> X.t) -> t -> t - val mem: t -> X.t -> bool + val map: (Elem.t -> Elem.t) -> t -> t + val mem: t -> Elem.t -> bool module Map: sig type 'a map - external get: 'a map -> (X.t * 'a) list = "%identity" - val add: X.t -> 'a -> 'a map -> 'a map - val mem: X.t -> 'a map -> bool + external get: 'a map -> (Elem.t * 'a) list = "%identity" + val add: Elem.t -> 'a -> 'a map -> 'a map + val mem: Elem.t -> 'a map -> bool val length: 'a map -> int val domain: 'a map -> t val restrict: 'a map -> t -> 'a map val empty: 'a map - val fold: (X.t -> 'a -> 'b -> 'b) -> 'a map -> 'b -> 'b + val fold: (Elem.t -> 'a -> 'b -> 'b) -> 'a map -> 'b -> 'b val iter: ('a -> unit) -> 'a map -> unit - val iteri: (X.t -> 'a -> unit) -> 'a map -> unit - val filter: (X.t -> 'a -> bool) -> 'a map -> 'a map + val iteri: (Elem.t -> 'a -> unit) -> 'a map -> unit + val filter: (Elem.t -> 'a -> bool) -> 'a map -> 'a map val is_empty: 'a map -> bool - val singleton: X.t -> 'a -> 'a map - val assoc_remove: X.t -> 'a map -> 'a * 'a map - val remove: X.t -> 'a map -> 'a map + val singleton: Elem.t -> 'a -> 'a map + val assoc_remove: Elem.t -> 'a map -> 'a * 'a map + val remove: Elem.t -> 'a map -> 'a map val merge: ('a -> 'a -> 'a ) -> 'a map -> 'a map -> 'a map val combine: ('a -> 'c) -> ('b -> 'c) -> ('a -> 'b -> 'c) -> 'a map -> 'b map -> 'c map @@ -55,20 +54,20 @@ sig val merge_elem: 'a -> 'a map -> 'a map -> 'a map val union_disj: 'a map -> 'a map -> 'a map val diff: 'a map -> t -> 'a map - val from_list: ('a -> 'a -> 'a ) -> (X.t * 'a) list -> 'a map - val from_list_disj: (X.t * 'a) list -> 'a map + val from_list: ('a -> 'a -> 'a ) -> (Elem.t * 'a) list -> 'a map + val from_list_disj: (Elem.t * 'a) list -> 'a map - val map_from_slist: (X.t -> 'a) -> t -> 'a map + val map_from_slist: (Elem.t -> 'a) -> t -> 'a map val collide: ('a -> 'b -> unit) -> 'a map -> 'b map -> unit val may_collide: ('a -> 'b -> unit) -> exn -> 'a map -> 'b map -> unit val map: ('a -> 'b) -> 'a map -> 'b map - val mapi: (X.t -> 'a -> 'b) -> 'a map -> 'b map + val mapi: (Elem.t -> 'a -> 'b) -> 'a map -> 'b map val constant: 'a -> t -> 'a map val num: int -> t -> int map val map_to_list: ('a -> 'b) -> 'a map -> 'b list - val mapi_to_list: (X.t -> 'a -> 'b) -> 'a map -> 'b list - val assoc: X.t -> 'a map -> 'a - val assoc_present: X.t -> 'a map -> 'a + val mapi_to_list: (Elem.t -> 'a -> 'b) -> 'a map -> 'b list + val assoc: Elem.t -> 'a map -> 'a + val assoc_present: Elem.t -> 'a map -> 'a val compare: ('a -> 'a -> int) -> 'a map -> 'a map -> int val hash: ('a -> int) -> 'a map -> int val equal: ('a -> 'a -> bool) -> 'a map -> 'a map -> bool @@ -78,6 +77,8 @@ sig end end +module Make(X : Custom.T) : S with module Elem = X and type t = private X.t list + module type FiniteCofinite = sig type elem type s = private Finite of elem list | Cofinite of elem list diff --git a/types/types.ml b/types/types.ml index 2b19a9d9..7deb0aa3 100644 --- a/types/types.ml +++ b/types/types.ml @@ -1005,9 +1005,9 @@ let get_variables main_memo temp_memo t = acc end in - (Var.Set.union tvpos tpos, - Var.Set.union tvneg tneg, - Var.Set.union tvars vars) + (Var.Set.cup tvpos tpos, + Var.Set.cup tvneg tneg, + Var.Set.cup tvars vars) in get_variables true Var.Set.(empty,empty,empty) t @@ -1023,7 +1023,7 @@ let get_variables = let check_var = let aux t = let tvpos, tvneg, tvars = get_variables t in - match Var.Set.(cardinal tvpos, cardinal tvneg, cardinal tvars) with + match Var.Set.(length tvpos, length tvneg, length tvars) with 1, 0, 1 -> let v = Var.Set.choose tvpos in if equiv (var v) t then `Pos v else `NotVar | 0, 1, 1 -> let v = Var.Set.choose tvneg in @@ -1049,7 +1049,7 @@ let all_vars t = let _, _, s = get_variables t in s let all_tlv t = - let p , n, _ = get_variables t in Var.Set.union p n + let p , n, _ = get_variables t in Var.Set.cup p n let is_closed delta t = Var.Set.(is_empty (diff (all_vars t) delta)) @@ -1843,7 +1843,7 @@ module Print = struct try let res = VarTable.fold (fun ((v1, v2) as k) tt acc -> - if Var.Set.(not (is_empty (inter v1 v2))) || is_empty tt then acc + if Var.Set.(not (is_empty (cap v1 v2))) || is_empty tt then acc else if Key.is_empty k && subtype any tt then raise Not_found else (k, tt) :: acc @@ -1866,7 +1866,7 @@ module Print = struct | i :: ll -> let factp, factn = List.fold_left (fun (accp, accn) (vp, vn) -> - Var.Set.(inter accp vp, inter accn vn)) + Var.Set.(cap accp vp, cap accn vn)) i ll in let nl = @@ -2636,7 +2636,7 @@ module Positive = struct |`Xml of v * v |`Record of bool * (bool * Ns.Label.t * v) list ] - and v = { mutable def : rhs; mutable node : node option } + and v = { mutable def : rhs; mutable node : node option; } module MemoHash = Hashtbl.Make( struct type t = v @@ -2697,7 +2697,7 @@ module Positive = struct n (* We shadow the corresponding definitions in the outer module *) - let forward () = { def = `Cup []; node = None } + let forward () = { def = `Cup []; node = None; } let def v d = v.def <- d let cons d = let v = forward () in def v d; v let ty d = cons (`Type d) @@ -2781,7 +2781,6 @@ module Positive = struct @@ decompose_kind Abstract.any abstract (BoolAbstracts.get t.abstract) [] in node_t.def <- (cup descr_t).def; node_t - in decompose_type t @@ -2883,7 +2882,7 @@ module Positive = struct let substitute_free delta t = let h = Hashtbl.create 17 in let subst d = - if Var.Set.mem d delta then var d else + if Var.Set.mem delta d then var d else try Hashtbl.find h d with Not_found -> @@ -2895,7 +2894,7 @@ module Positive = struct let substitute_kind delta kind t = let subst d = - if Var.Set.mem d delta then var d else + if Var.Set.mem delta d then var d else var (Var.set_kind kind d) in apply_subst ~subst:subst t @@ -2923,7 +2922,7 @@ module Positive = struct if ni == 0 then acc else pretty ni acc in let x = Var.mk (pretty !idx "") in - if Var.Set.mem x delta then (incr idx; freshvar idx) else x + if Var.Set.mem delta x then (incr idx; freshvar idx) else x in let vars = Hashtbl.create 17 in let memo = Memo.create 17 in @@ -2939,7 +2938,7 @@ module Positive = struct let () = Memo.add memo (pos,v) () in match v.def with |`Type d -> () - |`Variable d when Var.Set.mem d delta || (not (is_internal d) && not pos) -> + |`Variable d when Var.Set.mem delta d || (not (is_internal d) && not pos) -> Hashtbl.replace vars d v |`Variable d -> begin try @@ -3042,7 +3041,7 @@ module Tallying = struct with Not_found -> inf, sup in - if Var.Set.mem v delta then map + if Var.Set.mem delta v then map else VarMap.add v (new_i, new_s) map let inter delta map1 map2 = VarMap.fold (add delta) map1 map2 @@ -3246,8 +3245,8 @@ module Tallying = struct (* check if there exists a toplevel variable : fun (pos,neg) *) let toplevel delta single norm_rec mem p n = let _compare delta v1 v2 = - let monov1 = Var.Set.mem v1 delta in - let monov2 = Var.Set.mem v2 delta in + let monov1 = Var.Set.mem delta v1 in + let monov2 = Var.Set.mem delta v1 in if monov1 == monov2 then Var.compare v1 v2 else @@ -3310,7 +3309,7 @@ module Tallying = struct begin (* if there is only one variable then is it A <= 0 or 1 <= A *) let (v,p) = extract_variable t in - if Var.Set.mem v delta then CS.unsat (* if it is monomorphic, unsat *) + if Var.Set.mem delta v then CS.unsat (* if it is monomorphic, unsat *) else (* otherwise, create a single constraint according to its polarity *) let s = if p then (Pos (v,empty)) else (Neg (any,v)) in @@ -3332,7 +3331,7 @@ module Tallying = struct let acc = aux single_record normrec acc (BoolRec.get t.record) in let acc = (* Simplify the constraints on that type *) CS.S.filter - (fun m -> CS.M.for_all (fun v (s, t) -> not (Var.Set.mem v delta) || + (fun m -> CS.M.for_all (fun v (s, t) -> not (Var.Set.mem delta v) || let x = var v in subtype s x && subtype x t ) m) acc @@ -3489,7 +3488,7 @@ module Tallying = struct * means that the constraint is of the form (alpha,beta). *) if is_var t then begin let (beta,_) = extract_variable t in - if Var.Set.mem beta delta then aux alpha (s, t) acc + if Var.Set.mem delta beta then aux alpha (s, t) acc else let acc1 = aux beta (empty,any) acc in (* alpha <= beta --> { empty <= alpha <= beta ; empty <= beta <= any } *) @@ -3565,7 +3564,7 @@ module Tallying = struct let rec dom = function |I -> Var.Set.empty |S si -> CS.E.fold (fun v _ acc -> Var.Set.add v acc) si Var.Set.empty - |A (si,sj) -> Var.Set.union (dom si) (dom sj) + |A (si,sj) -> Var.Set.cup (dom si) (dom sj) (* composition of two symbolic substitution sets sigmaI, sigmaJ . Cartesian product *) @@ -3585,11 +3584,11 @@ module Tallying = struct let filter t si = vsi := get si; vst := all_vars t; - not(Var.Set.is_empty (Var.Set.inter !vst !vsi)) + not(Var.Set.is_empty (Var.Set.cap !vst !vsi)) in let filterdiff t si sj = let vsj = get sj in - not(Var.Set.is_empty (Var.Set.inter !vst (Var.Set.diff !vsi vsj))) + not(Var.Set.is_empty (Var.Set.cap !vst (Var.Set.diff !vsi vsj))) in let rec aux t = function |I -> t @@ -3610,7 +3609,7 @@ module Tallying = struct let codomain ll = List.fold_left (fun acc e -> CS.E.fold (fun _ v acc -> - Var.Set.union (all_vars v) acc + Var.Set.cup (all_vars v) acc ) e acc ) Var.Set.empty ll diff --git a/types/var.ml b/types/var.ml index 909d7bf5..d9c5b6e2 100644 --- a/types/var.ml +++ b/types/var.ml @@ -31,11 +31,6 @@ module Set = struct let dump ppf s = Utils.pp_list ~sep:";" ~delim:("{","}") V.dump ppf (get s) let pp ppf s = Utils.pp_list ~sep:";" ~delim:("{","}") V.pp ppf (get s) let printf = pp Format.std_formatter - let union = cup - let inter = cap - let cardinal = length - let mem t v = mem v t - let fold = fold end type 'a var_or_atom = [ `Atm of 'a | `Var of t ] diff --git a/types/var.mli b/types/var.mli index 8220e1a0..6e443f20 100644 --- a/types/var.mli +++ b/types/var.mli @@ -17,23 +17,9 @@ val is_internal : t -> bool *) module Set : sig - include Custom.T - val dump : Format.formatter -> t -> unit + include SortedList.S with type Elem.t = var val pp : Format.formatter -> t -> unit - val printf : t -> unit - val is_empty : t -> bool - val empty : t - val singleton : var -> t - val union : t -> t -> t - val diff : t -> t -> t - val mem : var -> t -> bool - val add : var -> t -> t - val inter : t -> t -> t - val subset : t -> t -> bool - val cardinal : t -> int - val from_list : var list -> t - val fold : ('a -> var -> 'a) -> 'a -> t -> 'a - val choose : t -> var + val dump : Format.formatter -> t -> unit end type 'a var_or_atom = [ `Atm of 'a | `Var of t ] diff --git a/typing/typed.ml b/typing/typed.ml index 2961a5af..1e574b65 100644 --- a/typing/typed.ml +++ b/typing/typed.ml @@ -184,7 +184,7 @@ module Print = struct and pp_v ppf (id, name) = Format.fprintf ppf "(%d,%s)" (Upool.int id) (Encodings.Utf8.to_string name) - and pp_fv ppf fv = Utils.pp_list pp_v ppf fv + and pp_fv ppf fv = Utils.pp_list pp_v ppf (IdSet.get fv) and pp_vars_poly ppf m = let pp_aux ppf (x,s) = Format.fprintf ppf "%a : %a" Ident.print x Var.Set.pp s in diff --git a/typing/typer.ml b/typing/typer.ml index bf4f6fc3..eca61fc4 100644 --- a/typing/typer.ml +++ b/typing/typer.ml @@ -614,7 +614,7 @@ module IType = struct ("This definition yields an empty type for " ^ (Ident.to_string v)); let vars_rhs = Types.all_vars t_rhs in - if List.exists (fun x -> not (Var.Set.mem (Var.mk (U.to_string x)) vars_rhs)) args then + if List.exists (fun x -> not (Var.Set.mem vars_rhs (Var.mk (U.to_string x)) )) args then raise_loc_generic loc (Printf.sprintf "Definition of type %s contains unbound type variables" (Ident.to_string v)); @@ -958,7 +958,7 @@ and branches env b = let ploc = p.loc in let p = pat env p in let fvp = Patterns.fv p in - let (fv2,e) = expr (enter_values_dummy fvp env) noloc e in + let (fv2,e) = expr (enter_values_dummy (fvp :> Id.t list) env) noloc e in let br_loc = merge_loc ploc e.Typed.exp_loc in (match Fv.pick (Fv.diff fvp fv2) with | None -> () @@ -1004,7 +1004,7 @@ and select_from_where env loc e from where = let p = pat !env p in let fvp = Patterns.fv p in let (fv2,e) = expr !env noloc e in - env := enter_values_dummy fvp !env; + env := enter_values_dummy (fvp :> Id.t list) !env; all_fv := Fv.cup (Fv.diff fv2 !bound_fv) !all_fv; bound_fv := Fv.cup fvp !bound_fv; (ploc,p,fvp,e) in @@ -1160,7 +1160,7 @@ and type_check' loc env ed constr precise = match ed with let delta_intf = List.fold_left (fun acc (t1, t2) -> - Var.Set.(union acc (union (Types.all_vars t1) (Types.all_vars t2))) + Var.Set.(cup acc (cup (Types.all_vars t1) (Types.all_vars t2))) ) env.delta a.fun_iface in @@ -1525,7 +1525,7 @@ let rec unused_branches b = (fun x -> let x = Ident.to_string x in if (String.compare x "$$$" = 0) then raise Exit else x) - (IdSet.get br.br_vars_empty) in + (br.br_vars_empty :> Id.t list) in let l = String.concat "," l in "The following variables always match the empty sequence: " ^ l -- GitLab
|
__label__pos
| 0.984752 |
Search in: Word
Vietnamese keyboard: Off
Virtual keyboard: Show
Computing (FOLDOC) dictionary
two-valued logic
Jump to user comments
logic (Commonly known as "Boolean algebra") A mathematical
system concerning the two truth values, TRUE and FALSE and
the functions AND, OR, NOT. Two-valued logic is one of
the cornerstones of logic and is also fundamental in the
The term "Boolean" is used here with its common meaning -
two-valued, though strictly Boolean algebra is more general
than this.
Boolean functions are usually represented by truth tables
where "0" represents "false" and "1" represents "true". E.g.:
A | B | A AND B
--+---+--------
0 | 0 | 0
0 | 1 | 0
1 | 0 | 0
1 | 1 | 1
This can be given more compactly using "x" to mean "don't
care" (either true or false):
A | B | A AND B
--+---+--------
0 | x | 0
x | 0 | 0
1 | 1 | 1
Similarly:
A | NOT A A | B | A OR B
--+------ --+---+--------
0 | 1 0 | 0 | 0
1 | 0 x | 1 | 1
1 | x | 1
Other functions such as XOR, NAND, NOR or functions of
more than two inputs can be constructed using combinations of
AND, OR, and NOT. AND and OR can be constructed from each
other using DeMorgan's Theorem:
|
__label__pos
| 0.955626 |
View on
MetaCPAN
Nicholas Clark > perl-5.8.3 > perllocale
Download:
perl-5.8.3.tar.bz2
Annotate this POD
Source Latest Release: perl-5.27.4
NAME ^
perllocale - Perl locale handling (internationalization and localization)
DESCRIPTION ^
Perl supports language-specific notions of data such as "is this a letter", "what is the uppercase equivalent of this letter", and "which of these letters comes first". These are important issues, especially for languages other than English--but also for English: it would be naïve to imagine that A-Za-z defines all the "letters" needed to write in English. Perl is also aware that some character other than '.' may be preferred as a decimal point, and that output date representations may be language-specific. The process of making an application take account of its users' preferences in such matters is called internationalization (often abbreviated as i18n); telling such an application about a particular set of preferences is known as localization (l10n).
Perl can understand language-specific data via the standardized (ISO C, XPG4, POSIX 1.c) method called "the locale system". The locale system is controlled per application using one pragma, one function call, and several environment variables.
NOTE: This feature is new in Perl 5.004, and does not apply unless an application specifically requests it--see "Backward compatibility". The one exception is that write() now always uses the current locale - see "NOTES".
PREPARING TO USE LOCALES ^
If Perl applications are to understand and present your data correctly according a locale of your choice, all of the following must be true:
If you want a Perl application to process and present your data according to a particular locale, the application code should include the use locale pragma (see "The use locale pragma") where appropriate, and at least one of the following must be true:
USING LOCALES ^
The use locale pragma
By default, Perl ignores the current locale. The use locale pragma tells Perl to use the current locale for some operations:
LC_COLLATE, LC_CTYPE, and so on, are discussed further in "LOCALE CATEGORIES".
The default behavior is restored with the no locale pragma, or upon reaching the end of block enclosing use locale.
The string result of any operation that uses locale information is tainted, as it is possible for a locale to be untrustworthy. See "SECURITY".
The setlocale function
You can switch locales as often as you wish at run time with the POSIX::setlocale() function:
# This functionality not usable prior to Perl 5.004
require 5.004;
# Import locale-handling tool set from POSIX module.
# This example uses: setlocale -- the function call
# LC_CTYPE -- explained below
use POSIX qw(locale_h);
# query and save the old locale
$old_locale = setlocale(LC_CTYPE);
setlocale(LC_CTYPE, "fr_CA.ISO8859-1");
# LC_CTYPE now in locale "French, Canada, codeset ISO 8859-1"
setlocale(LC_CTYPE, "");
# LC_CTYPE now reset to default defined by LC_ALL/LC_CTYPE/LANG
# environment variables. See below for documentation.
# restore the old locale
setlocale(LC_CTYPE, $old_locale);
The first argument of setlocale() gives the category, the second the locale. The category tells in what aspect of data processing you want to apply locale-specific rules. Category names are discussed in "LOCALE CATEGORIES" and "ENVIRONMENT". The locale is the name of a collection of customization information corresponding to a particular combination of language, country or territory, and codeset. Read on for hints on the naming of locales: not all systems name locales as in the example.
If no second argument is provided and the category is something else than LC_ALL, the function returns a string naming the current locale for the category. You can use this value as the second argument in a subsequent call to setlocale().
If no second argument is provided and the category is LC_ALL, the result is implementation-dependent. It may be a string of concatenated locales names (separator also implementation-dependent) or a single locale name. Please consult your setlocale(3) for details.
If a second argument is given and it corresponds to a valid locale, the locale for the category is set to that value, and the function returns the now-current locale value. You can then use this in yet another call to setlocale(). (In some implementations, the return value may sometimes differ from the value you gave as the second argument--think of it as an alias for the value you gave.)
As the example shows, if the second argument is an empty string, the category's locale is returned to the default specified by the corresponding environment variables. Generally, this results in a return to the default that was in force when Perl started up: changes to the environment made by the application after startup may or may not be noticed, depending on your system's C library.
If the second argument does not correspond to a valid locale, the locale for the category is not changed, and the function returns undef.
For further information about the categories, consult setlocale(3).
Finding locales
For locales available in your system, consult also setlocale(3) to see whether it leads to the list of available locales (search for the SEE ALSO section). If that fails, try the following command lines:
locale -a
nlsinfo
ls /usr/lib/nls/loc
ls /usr/lib/locale
ls /usr/lib/nls
ls /usr/share/locale
and see whether they list something resembling these
en_US.ISO8859-1 de_DE.ISO8859-1 ru_RU.ISO8859-5
en_US.iso88591 de_DE.iso88591 ru_RU.iso88595
en_US de_DE ru_RU
en de ru
english german russian
english.iso88591 german.iso88591 russian.iso88595
english.roman8 russian.koi8r
Sadly, even though the calling interface for setlocale() has been standardized, names of locales and the directories where the configuration resides have not been. The basic form of the name is language_territory.codeset, but the latter parts after language are not always present. The language and country are usually from the standards ISO 3166 and ISO 639, the two-letter abbreviations for the countries and the languages of the world, respectively. The codeset part often mentions some ISO 8859 character set, the Latin codesets. For example, ISO 8859-1 is the so-called "Western European codeset" that can be used to encode most Western European languages adequately. Again, there are several ways to write even the name of that one standard. Lamentably.
Two special locales are worth particular mention: "C" and "POSIX". Currently these are effectively the same locale: the difference is mainly that the first one is defined by the C standard, the second by the POSIX standard. They define the default locale in which every program starts in the absence of locale information in its environment. (The default default locale, if you will.) Its language is (American) English and its character codeset ASCII.
NOTE: Not all systems have the "POSIX" locale (not all systems are POSIX-conformant), so use "C" when you need explicitly to specify this default locale.
LOCALE PROBLEMS
You may encounter the following warning message at Perl startup:
perl: warning: Setting locale failed.
perl: warning: Please check that your locale settings:
LC_ALL = "En_US",
LANG = (unset)
are supported and installed on your system.
perl: warning: Falling back to the standard locale ("C").
This means that your locale settings had LC_ALL set to "En_US" and LANG exists but has no value. Perl tried to believe you but could not. Instead, Perl gave up and fell back to the "C" locale, the default locale that is supposed to work no matter what. This usually means your locale settings were wrong, they mention locales your system has never heard of, or the locale installation in your system has problems (for example, some system files are broken or missing). There are quick and temporary fixes to these problems, as well as more thorough and lasting fixes.
Temporarily fixing locale problems
The two quickest fixes are either to render Perl silent about any locale inconsistencies or to run Perl under the default locale "C".
Perl's moaning about locale problems can be silenced by setting the environment variable PERL_BADLANG to a zero value, for example "0". This method really just sweeps the problem under the carpet: you tell Perl to shut up even when Perl sees that something is wrong. Do not be surprised if later something locale-dependent misbehaves.
Perl can be run under the "C" locale by setting the environment variable LC_ALL to "C". This method is perhaps a bit more civilized than the PERL_BADLANG approach, but setting LC_ALL (or other locale variables) may affect other programs as well, not just Perl. In particular, external programs run from within Perl will see these changes. If you make the new settings permanent (read on), all programs you run see the changes. See ENVIRONMENT for the full list of relevant environment variables and "USING LOCALES" for their effects in Perl. Effects in other programs are easily deducible. For example, the variable LC_COLLATE may well affect your sort program (or whatever the program that arranges `records' alphabetically in your system is called).
You can test out changing these variables temporarily, and if the new settings seem to help, put those settings into your shell startup files. Consult your local documentation for the exact details. For in Bourne-like shells (sh, ksh, bash, zsh):
LC_ALL=en_US.ISO8859-1
export LC_ALL
This assumes that we saw the locale "en_US.ISO8859-1" using the commands discussed above. We decided to try that instead of the above faulty locale "En_US"--and in Cshish shells (csh, tcsh)
setenv LC_ALL en_US.ISO8859-1
or if you have the "env" application you can do in any shell
env LC_ALL=en_US.ISO8859-1 perl ...
If you do not know what shell you have, consult your local helpdesk or the equivalent.
Permanently fixing locale problems
The slower but superior fixes are when you may be able to yourself fix the misconfiguration of your own environment variables. The mis(sing)configuration of the whole system's locales usually requires the help of your friendly system administrator.
First, see earlier in this document about "Finding locales". That tells how to find which locales are really supported--and more importantly, installed--on your system. In our example error message, environment variables affecting the locale are listed in the order of decreasing importance (and unset variables do not matter). Therefore, having LC_ALL set to "En_US" must have been the bad choice, as shown by the error message. First try fixing locale settings listed first.
Second, if using the listed commands you see something exactly (prefix matches do not count and case usually counts) like "En_US" without the quotes, then you should be okay because you are using a locale name that should be installed and available in your system. In this case, see "Permanently fixing your system's locale configuration".
Permanently fixing your system's locale configuration
This is when you see something like:
perl: warning: Please check that your locale settings:
LC_ALL = "En_US",
LANG = (unset)
are supported and installed on your system.
but then cannot see that "En_US" listed by the above-mentioned commands. You may see things like "en_US.ISO8859-1", but that isn't the same. In this case, try running under a locale that you can list and which somehow matches what you tried. The rules for matching locale names are a bit vague because standardization is weak in this area. See again the "Finding locales" about general rules.
Fixing system locale configuration
Contact a system administrator (preferably your own) and report the exact error message you get, and ask them to read this same documentation you are now reading. They should be able to check whether there is something wrong with the locale configuration of the system. The "Finding locales" section is unfortunately a bit vague about the exact commands and places because these things are not that standardized.
The localeconv function
The POSIX::localeconv() function allows you to get particulars of the locale-dependent numeric formatting information specified by the current LC_NUMERIC and LC_MONETARY locales. (If you just want the name of the current locale for a particular category, use POSIX::setlocale() with a single parameter--see "The setlocale function".)
use POSIX qw(locale_h);
# Get a reference to a hash of locale-dependent info
$locale_values = localeconv();
# Output sorted list of the values
for (sort keys %$locale_values) {
printf "%-20s = %s\n", $_, $locale_values->{$_}
}
localeconv() takes no arguments, and returns a reference to a hash. The keys of this hash are variable names for formatting, such as decimal_point and thousands_sep. The values are the corresponding, er, values. See "localeconv" in POSIX for a longer example listing the categories an implementation might be expected to provide; some provide more and others fewer. You don't need an explicit use locale, because localeconv() always observes the current locale.
Here's a simple-minded example program that rewrites its command-line parameters as integers correctly formatted in the current locale:
# See comments in previous example
require 5.004;
use POSIX qw(locale_h);
# Get some of locale's numeric formatting parameters
my ($thousands_sep, $grouping) =
@{localeconv()}{'thousands_sep', 'grouping'};
# Apply defaults if values are missing
$thousands_sep = ',' unless $thousands_sep;
# grouping and mon_grouping are packed lists
# of small integers (characters) telling the
# grouping (thousand_seps and mon_thousand_seps
# being the group dividers) of numbers and
# monetary quantities. The integers' meanings:
# 255 means no more grouping, 0 means repeat
# the previous grouping, 1-254 means use that
# as the current grouping. Grouping goes from
# right to left (low to high digits). In the
# below we cheat slightly by never using anything
# else than the first grouping (whatever that is).
if ($grouping) {
@grouping = unpack("C*", $grouping);
} else {
@grouping = (3);
}
# Format command line params for current locale
for (@ARGV) {
$_ = int; # Chop non-integer part
1 while
s/(\d)(\d{$grouping[0]}($|$thousands_sep))/$1$thousands_sep$2/;
print "$_";
}
print "\n";
I18N::Langinfo
Another interface for querying locale-dependent information is the I18N::Langinfo::langinfo() function, available at least in UNIX-like systems and VMS.
The following example will import the langinfo() function itself and three constants to be used as arguments to langinfo(): a constant for the abbreviated first day of the week (the numbering starts from Sunday = 1) and two more constants for the affirmative and negative answers for a yes/no question in the current locale.
use I18N::Langinfo qw(langinfo ABDAY_1 YESSTR NOSTR);
my ($abday_1, $yesstr, $nostr) = map { langinfo } qw(ABDAY_1 YESSTR NOSTR);
print "$abday_1? [$yesstr/$nostr] ";
In other words, in the "C" (or English) locale the above will probably print something like:
Sun? [yes/no]
See I18N::Langinfo for more information.
LOCALE CATEGORIES ^
The following subsections describe basic locale categories. Beyond these, some combination categories allow manipulation of more than one basic category at a time. See "ENVIRONMENT" for a discussion of these.
Category LC_COLLATE: Collation
In the scope of use locale, Perl looks to the LC_COLLATE environment variable to determine the application's notions on collation (ordering) of characters. For example, 'b' follows 'a' in Latin alphabets, but where do 'á' and 'å' belong? And while 'color' follows 'chocolate' in English, what about in Spanish?
The following collations all make sense and you may meet any of them if you "use locale".
A B C D E a b c d e
A a B b C c D d E e
a A b B c C d D e E
a b c d e A B C D E
Here is a code snippet to tell what "word" characters are in the current locale, in that locale's order:
use locale;
print +(sort grep /\w/, map { chr } 0..255), "\n";
Compare this with the characters that you see and their order if you state explicitly that the locale should be ignored:
no locale;
print +(sort grep /\w/, map { chr } 0..255), "\n";
This machine-native collation (which is what you get unless use locale has appeared earlier in the same block) must be used for sorting raw binary data, whereas the locale-dependent collation of the first example is useful for natural text.
As noted in "USING LOCALES", cmp compares according to the current collation locale when use locale is in effect, but falls back to a char-by-char comparison for strings that the locale says are equal. You can use POSIX::strcoll() if you don't want this fall-back:
use POSIX qw(strcoll);
$equal_in_locale =
!strcoll("space and case ignored", "SpaceAndCaseIgnored");
$equal_in_locale will be true if the collation locale specifies a dictionary-like ordering that ignores space characters completely and which folds case.
If you have a single string that you want to check for "equality in locale" against several others, you might think you could gain a little efficiency by using POSIX::strxfrm() in conjunction with eq:
use POSIX qw(strxfrm);
$xfrm_string = strxfrm("Mixed-case string");
print "locale collation ignores spaces\n"
if $xfrm_string eq strxfrm("Mixed-casestring");
print "locale collation ignores hyphens\n"
if $xfrm_string eq strxfrm("Mixedcase string");
print "locale collation ignores case\n"
if $xfrm_string eq strxfrm("mixed-case string");
strxfrm() takes a string and maps it into a transformed string for use in char-by-char comparisons against other transformed strings during collation. "Under the hood", locale-affected Perl comparison operators call strxfrm() for both operands, then do a char-by-char comparison of the transformed strings. By calling strxfrm() explicitly and using a non locale-affected comparison, the example attempts to save a couple of transformations. But in fact, it doesn't save anything: Perl magic (see "Magic Variables" in perlguts) creates the transformed version of a string the first time it's needed in a comparison, then keeps this version around in case it's needed again. An example rewritten the easy way with cmp runs just about as fast. It also copes with null characters embedded in strings; if you call strxfrm() directly, it treats the first null it finds as a terminator. don't expect the transformed strings it produces to be portable across systems--or even from one revision of your operating system to the next. In short, don't call strxfrm() directly: let Perl do it for you.
Note: use locale isn't shown in some of these examples because it isn't needed: strcoll() and strxfrm() exist only to generate locale-dependent results, and so always obey the current LC_COLLATE locale.
Category LC_CTYPE: Character Types
In the scope of use locale, Perl obeys the LC_CTYPE locale setting. This controls the application's notion of which characters are alphabetic. This affects Perl's \w regular expression metanotation, which stands for alphanumeric characters--that is, alphabetic, numeric, and including other special characters such as the underscore or hyphen. (Consult perlre for more information about regular expressions.) Thanks to LC_CTYPE, depending on your locale setting, characters like 'æ', 'ð', 'ß', and 'ø' may be understood as \w characters.
The LC_CTYPE locale also provides the map used in transliterating characters between lower and uppercase. This affects the case-mapping functions--lc(), lcfirst, uc(), and ucfirst(); case-mapping interpolation with \l, \L, \u, or \U in double-quoted strings and s/// substitutions; and case-independent regular expression pattern matching using the i modifier.
Finally, LC_CTYPE affects the POSIX character-class test functions--isalpha(), islower(), and so on. For example, if you move from the "C" locale to a 7-bit Scandinavian one, you may find--possibly to your surprise--that "|" moves from the ispunct() class to isalpha().
Note: A broken or malicious LC_CTYPE locale definition may result in clearly ineligible characters being considered to be alphanumeric by your application. For strict matching of (mundane) letters and digits--for example, in command strings--locale-aware applications should use \w inside a no locale block. See "SECURITY".
Category LC_NUMERIC: Numeric Formatting
In the scope of use locale, Perl obeys the LC_NUMERIC locale information, which controls an application's idea of how numbers should be formatted for human readability by the printf(), sprintf(), and write() functions. String-to-numeric conversion by the POSIX::strtod() function is also affected. In most implementations the only effect is to change the character used for the decimal point--perhaps from '.' to ','. These functions aren't aware of such niceties as thousands separation and so on. (See "The localeconv function" if you care about these things.)
Output produced by print() is also affected by the current locale: it depends on whether use locale or no locale is in effect, and corresponds to what you'd get from printf() in the "C" locale. The same is true for Perl's internal conversions between numeric and string formats:
use POSIX qw(strtod);
use locale;
$n = 5/2; # Assign numeric 2.5 to $n
$a = " $n"; # Locale-dependent conversion to string
print "half five is $n\n"; # Locale-dependent output
printf "half five is %g\n", $n; # Locale-dependent output
print "DECIMAL POINT IS COMMA\n"
if $n == (strtod("2,5"))[0]; # Locale-dependent conversion
See also I18N::Langinfo and RADIXCHAR.
Category LC_MONETARY: Formatting of monetary amounts
The C standard defines the LC_MONETARY category, but no function that is affected by its contents. (Those with experience of standards committees will recognize that the working group decided to punt on the issue.) Consequently, Perl takes no notice of it. If you really want to use LC_MONETARY, you can query its contents--see "The localeconv function"--and use the information that it returns in your application's own formatting of currency amounts. However, you may well find that the information, voluminous and complex though it may be, still does not quite meet your requirements: currency formatting is a hard nut to crack.
See also I18N::Langinfo and CRNCYSTR.
LC_TIME
Output produced by POSIX::strftime(), which builds a formatted human-readable date/time string, is affected by the current LC_TIME locale. Thus, in a French locale, the output produced by the %B format element (full month name) for the first month of the year would be "janvier". Here's how to get a list of long month names in the current locale:
use POSIX qw(strftime);
for (0..11) {
$long_month_name[$_] =
strftime("%B", 0, 0, 0, 1, $_, 96);
}
Note: use locale isn't needed in this example: as a function that exists only to generate locale-dependent results, strftime() always obeys the current LC_TIME locale.
See also I18N::Langinfo and ABDAY_1..ABDAY_7, DAY_1..DAY_7, ABMON_1..ABMON_12, and ABMON_1..ABMON_12.
Other categories
The remaining locale category, LC_MESSAGES (possibly supplemented by others in particular implementations) is not currently used by Perl--except possibly to affect the behavior of library functions called by extensions outside the standard Perl distribution and by the operating system and its utilities. Note especially that the string value of $! and the error messages given by external utilities may be changed by LC_MESSAGES. If you want to have portable error codes, use %!. See Errno.
SECURITY ^
Although the main discussion of Perl security issues can be found in perlsec, a discussion of Perl's locale handling would be incomplete if it did not draw your attention to locale-dependent security issues. Locales--particularly on systems that allow unprivileged users to build their own locales--are untrustworthy. A malicious (or just plain broken) locale can make a locale-aware application give unexpected results. Here are a few possibilities:
Such dangers are not peculiar to the locale system: any aspect of an application's environment which may be modified maliciously presents similar challenges. Similarly, they are not specific to Perl: any programming language that allows you to write programs that take account of their environment exposes you to these issues.
Perl cannot protect you from all possibilities shown in the examples--there is no substitute for your own vigilance--but, when use locale is in effect, Perl uses the tainting mechanism (see perlsec) to mark string results that become locale-dependent, and which may be untrustworthy in consequence. Here is a summary of the tainting behavior of operators and functions that may be affected by the locale:
Three examples illustrate locale-dependent tainting. The first program, which ignores its locale, won't run: a value taken directly from the command line may not be used to name an output file when taint checks are enabled.
#/usr/local/bin/perl -T
# Run with taint checking
# Command line sanity check omitted...
$tainted_output_file = shift;
open(F, ">$tainted_output_file")
or warn "Open of $untainted_output_file failed: $!\n";
The program can be made to run by "laundering" the tainted value through a regular expression: the second example--which still ignores locale information--runs, creating the file named on its command line if it can.
#/usr/local/bin/perl -T
$tainted_output_file = shift;
$tainted_output_file =~ m%[\w/]+%;
$untainted_output_file = $&;
open(F, ">$untainted_output_file")
or warn "Open of $untainted_output_file failed: $!\n";
Compare this with a similar but locale-aware program:
#/usr/local/bin/perl -T
$tainted_output_file = shift;
use locale;
$tainted_output_file =~ m%[\w/]+%;
$localized_output_file = $&;
open(F, ">$localized_output_file")
or warn "Open of $localized_output_file failed: $!\n";
This third program fails to run because $& is tainted: it is the result of a match involving \w while use locale is in effect.
ENVIRONMENT ^
PERL_BADLANG
A string that can suppress Perl's warning about failed locale settings at startup. Failure can occur if the locale support in the operating system is lacking (broken) in some way--or if you mistyped the name of a locale when you set up your environment. If this environment variable is absent, or has a value that does not evaluate to integer zero--that is, "0" or ""-- Perl will complain about locale setting failures.
NOTE: PERL_BADLANG only gives you a way to hide the warning message. The message tells about some problem in your system's locale support, and you should investigate what the problem is.
The following environment variables are not specific to Perl: They are part of the standardized (ISO C, XPG4, POSIX 1.c) setlocale() method for controlling an application's opinion on data.
LC_ALL
LC_ALL is the "override-all" locale environment variable. If set, it overrides all the rest of the locale environment variables.
LANGUAGE
NOTE: LANGUAGE is a GNU extension, it affects you only if you are using the GNU libc. This is the case if you are using e.g. Linux. If you are using "commercial" UNIXes you are most probably not using GNU libc and you can ignore LANGUAGE.
However, in the case you are using LANGUAGE: it affects the language of informational, warning, and error messages output by commands (in other words, it's like LC_MESSAGES) but it has higher priority than LC_ALL. Moreover, it's not a single value but instead a "path" (":"-separated list) of languages (not locales). See the GNU gettext library documentation for more information.
LC_CTYPE
In the absence of LC_ALL, LC_CTYPE chooses the character type locale. In the absence of both LC_ALL and LC_CTYPE, LANG chooses the character type locale.
LC_COLLATE
In the absence of LC_ALL, LC_COLLATE chooses the collation (sorting) locale. In the absence of both LC_ALL and LC_COLLATE, LANG chooses the collation locale.
LC_MONETARY
In the absence of LC_ALL, LC_MONETARY chooses the monetary formatting locale. In the absence of both LC_ALL and LC_MONETARY, LANG chooses the monetary formatting locale.
LC_NUMERIC
In the absence of LC_ALL, LC_NUMERIC chooses the numeric format locale. In the absence of both LC_ALL and LC_NUMERIC, LANG chooses the numeric format.
LC_TIME
In the absence of LC_ALL, LC_TIME chooses the date and time formatting locale. In the absence of both LC_ALL and LC_TIME, LANG chooses the date and time formatting locale.
LANG
LANG is the "catch-all" locale environment variable. If it is set, it is used as the last resort after the overall LC_ALL and the category-specific LC_....
NOTES ^
Backward compatibility
Versions of Perl prior to 5.004 mostly ignored locale information, generally behaving as if something similar to the "C" locale were always in force, even if the program environment suggested otherwise (see "The setlocale function"). By default, Perl still behaves this way for backward compatibility. If you want a Perl application to pay attention to locale information, you must use the use locale pragma (see "The use locale pragma") to instruct it to do so.
Versions of Perl from 5.002 to 5.003 did use the LC_CTYPE information if available; that is, \w did understand what were the letters according to the locale environment variables. The problem was that the user had no control over the feature: if the C library supported locales, Perl used them.
I18N:Collate obsolete
In versions of Perl prior to 5.004, per-locale collation was possible using the I18N::Collate library module. This module is now mildly obsolete and should be avoided in new applications. The LC_COLLATE functionality is now integrated into the Perl core language: One can use locale-specific scalar data completely normally with use locale, so there is no longer any need to juggle with the scalar references of I18N::Collate.
Sort speed and memory use impacts
Comparing and sorting by locale is usually slower than the default sorting; slow-downs of two to four times have been observed. It will also consume more memory: once a Perl scalar variable has participated in any string comparison or sorting operation obeying the locale collation rules, it will take 3-15 times more memory than before. (The exact multiplier depends on the string's contents, the operating system and the locale.) These downsides are dictated more by the operating system's implementation of the locale system than by Perl.
write() and LC_NUMERIC
Formats are the only part of Perl that unconditionally use information from a program's locale; if a program's environment specifies an LC_NUMERIC locale, it is always used to specify the decimal point character in formatted output. Formatted output cannot be controlled by use locale because the pragma is tied to the block structure of the program, and, for historical reasons, formats exist outside that block structure.
Freely available locale definitions
There is a large collection of locale definitions at ftp://dkuug.dk/i18n/WG15-collection . You should be aware that it is unsupported, and is not claimed to be fit for any purpose. If your system allows installation of arbitrary locales, you may find the definitions useful as they are, or as a basis for the development of your own locales.
I18n and l10n
"Internationalization" is often abbreviated as i18n because its first and last letters are separated by eighteen others. (You may guess why the internalin ... internaliti ... i18n tends to get abbreviated.) In the same way, "localization" is often abbreviated to l10n.
An imperfect standard
Internationalization, as defined in the C and POSIX standards, can be criticized as incomplete, ungainly, and having too large a granularity. (Locales apply to a whole process, when it would arguably be more useful to have them apply to a single thread, window group, or whatever.) They also have a tendency, like standards groups, to divide the world into nations, when we all know that the world can equally well be divided into bankers, bikers, gamers, and so on. But, for now, it's the only standard we've got. This may be construed as a bug.
Unicode and UTF-8 ^
The support of Unicode is new starting from Perl version 5.6, and more fully implemented in the version 5.8. See perluniintro and perlunicode for more details.
Usually locale settings and Unicode do not affect each other, but there are exceptions, see "Locales" in perlunicode for examples.
BUGS ^
Broken systems
In certain systems, the operating system's locale support is broken and cannot be fixed or used by Perl. Such deficiencies can and will result in mysterious hangs and/or Perl core dumps when the use locale is in effect. When confronted with such a system, please report in excruciating detail to <[email protected]>, and complain to your vendor: bug fixes may exist for these problems in your operating system. Sometimes such bug fixes are called an operating system upgrade.
SEE ALSO ^
I18N::Langinfo, perluniintro, perlunicode, open, "isalnum" in POSIX, "isalpha" in POSIX, "isdigit" in POSIX, "isgraph" in POSIX, "islower" in POSIX, "isprint" in POSIX, "ispunct" in POSIX, "isspace" in POSIX, "isupper" in POSIX, "isxdigit" in POSIX, "localeconv" in POSIX, "setlocale" in POSIX, "strcoll" in POSIX, "strftime" in POSIX, "strtod" in POSIX, "strxfrm" in POSIX.
HISTORY ^
Jarkko Hietaniemi's original perli18n.pod heavily hacked by Dominic Dunlop, assisted by the perl5-porters. Prose worked over a bit by Tom Christiansen.
Last update: Thu Jun 11 08:44:13 MDT 1998
syntax highlighting:
|
__label__pos
| 0.570185 |
Showing entries with tag "ini".
Found 2 entries
Perl: hash to ini
I'm a big fan of .ini files because they're human readable, and also very machine readable. I wrote a quick function to convert a hashref to a simple .ini string.
my $str = hash_to_ini({ data => { 'name' => 'scott', animal => 'dog' }});
sub hash_to_ini {
my $x = $_[0];
my $ret = '';
foreach my $key (sort(keys(%$x))) {
my $val = $x->{$key};
if (ref($val) eq "HASH") {
$ret .= "[$key]\n";
foreach my $k (sort(keys(%$val))) { $ret .= "$k = " . $val->{$k} . "\n"; }
} else { $ret .= "$key = $val\n" }
}
$ret =~ s/\n\[/\n\n[/; # Add a space between sections
return $ret;
}
I also wrote a version in PHP
function hash_to_ini($x) {
$ret = '';
foreach (array_keys($x) as $key) {
$val = $x[$key];
if (is_array($val)) {
$ret .= "[$key]\n";
foreach (array_keys($val) as $k) { $ret .= "$k = " . $val[$k] . "\n"; }
} else { $ret .= "$key = $val\n"; }
}
$ret = preg_replace("/\n\[/", "\n\n[", $ret);
return $ret;
}
Note: see also parse_ini()
Leave A Reply
Perl: Simple .ini parser
I wrote a simple .ini parsing function in Perl.
my $hash_ref = parse_ini("/tmp/config.ini");
sub parse_ini {
open (my $INI, "<", $_[0]) or return undef;
my $ret = {};
my $section = "_";
while (my $line = readline($INI)) {
if ($line =~ /^\[(.+?)\]/) { # Section heading
$section = $1;
} elsif ($line =~ /^(\w.*?)\s*=\s*"?(.*?)"?\s*$/) { # Key/Value pair
$ret->{$section}->{$1} = $2;
}
}
return $ret;
}
Leave A Reply
|
__label__pos
| 0.9999 |
=== NOTE === This is mainly handy for Trolls, in a certain view it helps Orc players. I did remove the month because it is not important to me; more over, you can check month when you log in your account. I added variables for colours since <+attr> and <+noattr> commands are bugged in PowTTY. It is just a way to make colours work in actions. If you do use Powwow you can of course use the commands to make it easier. --- ACTIONS --- #action >+dtime ^$1 on $2, the $3 of $4, Year $5 of the Third Age.={#if (\$1?"pm") {#if (\$1=="12pm") {#(@-33=12)}| #else {#(@-33=%\$1+12)}}| #else {#if (\$1=="12am") {#(@-33=0)}| #else {#(@-33=%\$1)}}|#(@-34=(:?($months.<($months?\$4))-1)%12+1)|#print ("TIME: "+\$1+" \-\=\|NIGHT: "+$night:@-34+"pm DAWN: "+$dawn:@-34+"am\|\=\-")|#print ("------------------------------------------------------------------")}} #action >+wtime ^$2, the $3 of $4, Year $5 of the Third Age.={#(@-34=(:?($months.<($months?\$4))-1)%12+1)|#print ("TIME: "+\$1+" \-\=\|NIGHT: "+$night:@-34+"pm DAWN: "+$dawn:@-34+"am\|\=\-")|#print ("------------------------------------------------------------------")}} #action >+calendar According to the $1 calendar, it is:={#print (" ")|#print (" ")|#print ("==================================================================")} #action >+istar The present Tan Istar, Lord of the Istari Council is $1.={#print ("------------------------------------------------------------------")} #action >+rebootmume Last reboot &1={#print ("==================================================================")|#print (" ")} #action >+moon You can see a &1 ($2) &3.={#print (""+$colour_attribute_underline+"MOON"+$colour_attribute_ansioff+": $1 ($2)")} --- VARIABLES --- #($colour_attribute_ansioff = "\033[0m") #($colour_attribute_blink = "\033[5m") #($colour_attribute_bold = "\033[1m") #($colour_attribute_inverse = "\033[7m") #($colour_attribute_underline = "\033[4m") #($colour_background_black = "\033[40m") #($colour_background_blue = "\033[44m") #($colour_background_cyan = "\033[46m") #($colour_background_green = "\033[42m") #($colour_background_magenta = "\033[45m") #($colour_background_red = "\033[41m") #($colour_background_white = "\033[47m") #($colour_background_yellow = "\033[43m") #($colour_high_black = "\033[1m\033[30m") #($colour_high_blue = "\033[1m\033[34m") #($colour_high_cyan = "\033[1m\033[36m") #($colour_high_green = "\033[1m\033[32m") #($colour_high_magenta = "\033[1m\033[35m") #($colour_high_red = "\033[1m\033[31m") #($colour_high_white = "\033[1m\033[37m") #($colour_high_yellow = "\033[1m\033[33m") #($colour_normal_black = "\033[30m") #($colour_normal_blue = "\033[34m") #($colour_normal_cyan = "\033[36m") #($colour_normal_green = "\033[32m") #($colour_normal_magenta = "\033[35m") #($colour_normal_red = "\033[31m") #($colour_normal_white = "\033[37m") #($colour_normal_yellow = "\033[33m") #($dawn = "7 7 6 5 4 5 6 7 7 8 9 8") #($months = "Astron Thrimidge Forelithe Afterlithe Wedmath Halimath Winterfilth Blotmath Foreyule Afteryule Solmath Rethe Astron Thrimidge Forelithe Afterlithe Wedmath Halimath Winterfilth Blotmath Foreyule Afteryule Solmath Rethe Gwirith Lothron Norui Cerveth Urui Ivanneth Narbeleth Hithui Birithron Narwain Ninui Gwaeron Gwirith Lothron N\363rui Cerv\351th \332rui Ivanneth Narbeleth Hithui Birithron Narwain N\355nui Gwaeron") #($night = "7 8 8 9 10 9 8 8 7 6 5 6") --- MARKS --- #mark Dawn &=cyan on black #mark 11am=white on black #mark 10am=white on black #mark 9am=white on black #mark 8am=white on black #mark 7am=white on black #mark 6am=white on black #mark 5am=white on black #mark 4am=white on black #mark 3am=white on black #mark 2am=white on black #mark 1am=white on black #mark 12pm=white on black #mark 11pm=cyan on black #mark 10pm=cyan on black #mark 9pm=cyan on black #mark 8pm=cyan on black #mark 7pm=cyan on black #mark 6pm=cyan on black #mark 5pm=cyan on black #mark 4pm=cyan on black #mark 3pm=cyan on black #mark 2pm=cyan on black #mark 1pm=cyan on black #mark 12am=cyan on black #mark TIME:=magenta on black #mark DAWN:=yellow on black #mark NIGHT:=red on black
|
__label__pos
| 0.980576 |
Categories
BootstrapVue
BootstrapVue — Tooltips and Popover Directives
Spread the love
To make good looking Vue apps, we need to style our components.
To make our lives easier, we can use components with styles built-in.
In this article, we look at how to use the v-b-tooltip and v-b-popover directives.
v-b-tooltip Directive
We can use the html modifier to make the v-b-tooltip directive render HTML.
For example, we can write:
<template>
<div id="app">
<b-button v-b-tooltip.html title="Hello <b>World!</b>">html tooltip</b-button>
</div>
</template>
<script>
export default {
name: "App"
};
</script>
We have the b-button component with the v-b-tooltip directive.
It has the html modifier added to it so that we can render HTML that’s set as the value of title .
Hiding and Showing Tooltip with $root Events
We can hide and show all tooltips by emitting events on this.$root .
For example, we can write:
this.$root.$emit('bv::hide::tooltip')
to hide a tooltip.
Also, we can write:
this.$root.$emit('bv::hide::tooltip', 'trigger-button-id')
to hide a tooltip by its ID.
To show a tooltip, we can write:
this.$root.$emit('bv::show::tooltip', 'trigger-button-id')
to show a tooltip by ID.
Disabling and Enabling Tooltip with $root Events
Likewise, we can disable or enable tooltips with $root events.
For example, we can write:
this.$root.$emit('bv::disable::tooltip')
to disable all tooltips.
Also, we can write:
this.$root.$emit('bv::disable::tooltip', 'trigger-button-id')
to disable a tooltip with a particular ID.
To enable it, we can write:
this.$root.$emit('bv::enable::tooltip', 'trigger-button-id')
Listening to Tooltip Changes with $root Events
We can listen to tooltip events if we add a listener in the mounted hook.
For example, we can write:
export default {
mounted() {
this.$root.$on('bv::tooltip::show', bvEvent => {
console.log('bvEvent:', bvEvent)
})
}
}
Hover
We can use the v-b-hover directive to run a callback on hover.
For example, we can write:
<div v-b-hover="callback">content</div>
Then the callback is run when we hover over the div.
We can write a callback as follows:
<template>
<div id="app">
<div v-b-hover="callback">content</div>
</div>
</template>
<script>
export default {
name: "App",
methods: {
callback(hovered){
console.log(hovered)
}
}
};
</script>
The callback takes a hovered parameter, which is true when we hover over the div and false otherwise.
Popovers
We can add a popover to our app with the v-b-popover directive.
For example, we can write:
<template>
<div id="app">
<b-button v-b-popover.hover="'popover content'" title="Popover">Hover Me</b-button>
</div>
</template>
<script>
export default {
name: "App"
};
</script>
We set the title prop to set the title.
The content is the value of the v-b-popover directive.
The hover modifier makes it show when we hover over the button.
Popover Positioning
We can change the positioning of the popover.
The possible values for positioning are top, topleft, topright, right, righttop, rightbottom, bottom, bottomleft, bottomright, left, lefttop, and leftbottom .
For example, we can write:
<template>
<div id="app">
<b-button v-b-popover.hover.bottomright="'popover content'" title="Popover">Hover Me</b-button>
</div>
</template>
<script>
export default {
name: "App"
};
</script>
Then the popover will be shown on the bottom right because of the bottomright modifier.
Triggers
We can trigger the showing of the popover on events other than hover.
We can show it on click, hover, or focus,
To change the event that triggers the display of the popover, we can change the modifier of v-b-popover .
For example, we can write:
<template>
<div id="app">
<b-button v-b-popover.click="'popover content'" title="Popover">Click Me</b-button>
</div>
</template>
<script>
export default {
name: "App"
};
</script>
Now the popover is displayed when we click the button because of the click modifier.
Dismiss Popover on Next Click
We can add the blur and click modifiers to dismiss the popover on the next click.
For example, we can write:
<template>
<div id="app">
<b-button v-b-popover.click.blur="'popover content'" title="Popover">Click Me</b-button>
</div>
</template>
<script>
export default {
name: "App"
};
</script>
Now when we click the Click Me button, the popover will toggle on and off.
Heading and Content
We can change the heading and content with an object we set as the value of v-b-popover .
For example, we can write:
<template>
<div id="app">
<b-button v-b-popover="options">Hover Me</b-button>
</div>
</template>
<script>
export default {
name: "App",
data() {
return {
options: {
title: "This is the <b>title</b>",
content: "This is the <i>content<i>",
html: true
}
};
}
};
</script>
The title and content are set in the options object instead of in the props.
html lets us display HTML content.
Conclusion
We can trigger root events to enable/disable or toggle on and off tooltips.
Also, we can add popovers to display messages.
By John Au-Yeung
Web developer specializing in React, Vue, and front end development.
Leave a Reply
Your email address will not be published. Required fields are marked *
|
__label__pos
| 0.987913 |
Getting Data In
DBConnect input error: No output.format defined for stanza
pembleton
Path Finder
Hello,
I have been using the DBConnect app for splunk for a few weeks, indexing just fine. The problem came up last week, when I noticed events were not up to date.
I checked dbx.log for errors and found the following error for all inputs:
Error:Scheduler - Error while reading
stanza=[dbmon-tail://DatabaseName/InputName]:
com.splunk.config.SplunkConfigurationException: No output.format defined for stanza.
This error occurs even though output.format=kv EXISTS in the stanza in inputs.conf!
I tried restarting Splunk unsuccessfully.
When I used the CLI command:
splunk.exe cmd btool inputs list dbmon-tail://DatabaseName/InputName
the output showed me that all inputs are automatically disabled (disable=1).
How can I prevent this from happenning?
Help please!
0 Karma
lukejadamec
Super Champion
Check for a conflicting inputs.conf file.
Changes to inputs.conf will be made in the app that initiated the change, so if you enter manager from launcher and make a change to a dbx input the stanza will be written to launcher/local/inputs.conf, but if you change it from db connect it will go to dbx/local/inputs.conf.
0 Karma
lukejadamec
Super Champion
Did you restart splunk?
0 Karma
wbfoxii
Communicator
Having the same problem. I looked and indeed there was a stanza in the launcher/local/ directory. I deleted it and then used the DBX app to disable/enable. Still getting the same error. Checked "btool inputs list" and I don't see a conflict.
0 Karma
Take the 2021 Splunk Career Survey
Help us learn about how Splunk has
impacted your career by taking the 2021 Splunk Career Survey.
Earn $50 in Amazon cash!
|
__label__pos
| 0.669661 |
WordPress网站伪静态化指南
WordPress网站伪静态化指南
WordPress是一款非常优秀的开源内容管理系统(CMS),目前已经成为全球大众最喜欢的网站建站工具之一。作为一个开源CMS系统,WordPress的可定制性非常强,并且可以通过伪静态化来提高网站的SEO效果和用户体验。本篇文章将向您介绍关于WordPress网站伪静态化的指南。
1.什么是伪静态化
WordPress网站伪静态化指南
伪静态化是将动态网址转换为静态网址,在SEO效果上,伪静态化可以让搜索引擎更容易地索引网站内容,提高网站的排名。同时,在用户体验上,伪静态化也可以提高网站加载速度,使用户更快地访问网站内容。
伪静态化实际上并不是真正意义上的静态化,它只是通过URL重写技术将 WordPress 网站动态生成的 URL 转换成静态 URL,让整个网站看起来更加美观、规范,呈现出一个静态的外观;而实际上,WordPress网站的页面内容还是动态生成的。
例如:一个原始的动态网址为:http://www.example.com/index.php?cat=1&id=2 ,而伪静态网址为:http://www.example.com/category/1/post/2/。
2.WordPress伪静态化的优势
2.1 提升SEO效果
搜索引擎喜欢静态HTML页面,因为链接结构清晰,可读性好,排名更容易优化。同时,如果您的网站使用了伪静态URL,那么网页会更容易被搜索引擎索引,从而有利于提高网站SEO效果。
2.2 提高用户体验
如前所述,伪静态化能够提高网站的加载速度,尤其是在高并发访问的情况下,用户体验也会得到大幅度提升。同时,伪静态化后的URL更加容易被人类用户识别和记忆,这有助于提高用户体验,用户可以通过直接输入网址到地址栏中找到相应的文章。
3.WordPress伪静态URL的实现方式
在标准WordPress的配置中,URL只有一种形式:动态URL。因此,我们需要寻找合适的工具,将WordPress动态URL转化为伪静态URL。以下介绍几种常见的WordPress伪静态化实现方式:
3.1 使用插件
WordPress具有非常丰富的第三方插件,其中不难找到可以用于伪静态化的插件。Yoast SEO是一款非常常用的SEO插件,在其高级设置中有URL重写选项,通过设置可以实现伪静态化。
具体方法:进入WordPress后台——找到 Yoast SEO 插件——高级设置——URL重写——开启“删除分类的基本URL”和“删除标签的基本URL”——保存设置即可。
3.2 修改htaccess文件
.htaccess 文件是 Apache 服务器中常用的配置文件,它可以控制服务器的配置和行为。修改.htaccess文件可以实现伪静态化。
具体方法:
1)在你的网站根目录中找到 .htaccess 文件(通常是隐藏文件,需要在FTP中找到) 。
2)备份 .htaccess 文件(在修改 .htaccess 时,最好先备份一下)。
3)手动修改.htaccess文件中的规则,若没有则手动添加,可以通过以下规则实现伪静态化:
# BEGIN WordPress
RewriteEngine On
RewriteBase /
RewriteRule ^index.php$ – [L]
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /index.php [L]
# END WordPress
4)完成修改后上传.htaccess 文件
3.3 Nginx配置
跟 Apache 不一样的是,Nginx 的伪静态规则需要在 Nginx 的配置文件中进行编写。
具体方法:
1)打开Nginx主配置文件 nginx.conf。
2)找到 server 段,在配置项 location / {} 中,加入如下代码:
location / {
if (!-e $request_filename) {
rewrite ^(.*)$ /index.php?q=$1 last;
break;
}
}
3)保存配置文件并重新加载 nginx,使配置文件生效。
以上三种方法实现伪静态化的优缺点各有所属,不同的网站可以根据自己的情况选择不同的方法进行实现。
4.WordPress伪静态化注意事项
要想成功地实现伪静态化,需要注意一些事项。以下列举了一些需要注意的问题:
4.1 备份文件
无论采取何种伪静态化方法,操作前务必备份文件,一旦出现问题可以迅速恢复。
4.2 实际测试验证
伪静态化操作完成后,要进行测试验证,确保伪静态化后网站的功能正常,并且确保页面能够快速加载。
4.3 考虑好SEO迁移
如果网站已经上线或者有其他站点链接到该网站,为了避免网站SEO价值的损失,在伪静态化操作后一定要考虑好SEO迁移工作,尤其是网址的更改可能对搜索引擎优化结果产生影响。
4.4 避免破坏已有的SEO结构
在操作伪静态化时,要注意现有的SEO框架和结构,要尽量避免对它造成影响,防止造成损失。
5.总结
伪静态化可以提高网站的SEO效果和用户体验,使网站更容易被搜索引擎收录,同时也加快了网站的访问和页面加载速度。本文针对WordPress伪静态化的实现方式,进行了讲解,并且提出了一些需要注意的问题,希望对大家进行建站和SEO优化时有所帮助。
(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2023年6月19日 上午10:24
下一篇 2023年6月20日 上午11:01
相关推荐
发表回复
您的电子邮箱地址不会被公开。 必填项已用 * 标注
|
__label__pos
| 0.922174 |
Python Quiz
The Python practice quiz is designed to check your understanding in one of the most common programming languages. The practice quiz covers the basics of Python as well as a few advanced topics. However, the quiz is primarily intended for people who are new to the language and have learned by reading the tutorials on the website.
1. In Python, what is one function to output content to the console?
2. Which of the following statements is NOT true about Python?
3. What symbol can you use to comment out one line of code?
4. What can you put before and after a section to make all of the lines a comment?
5. How do you create a variable “a” that is equal to 2?
6. How would you cast the string variable “a” that is equal to “2” into the integer 2?
7. How would you cast the int variable “a” that is equal to 2 into the string “2”?
8. Which of the following is not a valid assignment operator?
9. Which one of the following is a valid Python if statement
10. What keyword would you use to add an alternative condition to an if statement?
11. Which of the following is a valid way to start a function in Python?
12. Which of the following is a valid for loop in Python?
13. Which of the following is a valid way to start a while loop in Python?
14. If you have a variable “example”, how do you check to see what type of variable you are working with?
15. How can you replace all of the letter a in a string variable “example” with the letter b?
16. Which of the following is a valid list in Python?
17. Which of the following is a valid tuple in Python?
18. Which of the following is a valid dictionary in Python?
19. How would you print the second item in the list variable “example”?
20. What would the statement “print(‘%.2f’ % 123.444)” print out?
21. What is the keyword used after the try statement to handle exceptions?
22. What is the proper way to open a file that you intend to read from?
23. What is the proper way to open a file that you plan to write to?
24. If you had a statement like, “f = open(“test.txt”,”w”)”, what would happen to the file as soon as that statement is executed?
25. What is the difference between a class and an object in Python?
Question 1 of 25
If you enjoyed this resource, support me by sharing this page with others.
Share on Facebook
Facebook
Share on StumbleUpon
StumbleUpon
Digg this
Digg
Tweet about this on Twitter
Twitter
Share on Reddit
Reddit
|
__label__pos
| 1 |
Converting data XML sequential rows of data to tables in another XML
Hi
I have following xml which I want to convert them into different tables when it find a row with header and all following rows as table data until it finds another row with header. the xml looks like below.
<?xml version="1.0" encoding="UTF-8"?>
<root>
<table>
<row>
<header>
<paragraph>Rank</paragraph>
</header>
<header>
<paragraph>Name</paragraph>
</header>
</row>
<row>
<cell>
<paragraph>1</paragraph>
</cell>
<cell>
<paragraph>knko</paragraph>
</cell>
</row>
<row>
<header>
<paragraph>2</paragraph>
</header>
<header>
<paragraph>sample</paragraph>
</header>
</row>
<row>
<header>
<paragraph>3</paragraph>
</header>
<header>
<paragraph>third</paragraph>
</header>
</row>
<row>
<header>
<paragraph>Leading</paragraph>
</header>
<header>
<paragraph>Flight</paragraph>
</header>
</row>
<row>
<header>
<paragraph>300</paragraph>
</header>
<header>
<paragraph>junkname</paragraph>
</header>
</row>
<row>
<header>
<paragraph>400</paragraph>
</header>
<header>
<paragraph>Currency</paragraph>
</header>
</row>
</table>
</root>
I wanted to convert this into 2 tables like below
<root>
<table>
<tr>
<th>Rank</th>
<th>Name</th>
</tr>
<tr xmlns="">
<td>1</td>
<td>knko</td>
</tr>
<tr xmlns="">
<td>2</td>
<td>sample</td>
</tr>
<tr xmlns="">
<td>3</td>
<td>third</td>
</tr>
</table>
<table>
<tr>
<th>Leading</th>
<th>Flight</th>
</tr>
<tr xmlns="">
<td>300</td>
<td>junkname</td>
</tr>
<tr xmlns="">
<td>400</td>
<td>Currency</td>
</tr>
</table>
</root>
I am a new to XSLT , thanks in advance for your help.
Regards
Sri
you can do this by using PHP to get the xml data and place it into a JSON string then you can use javascript/jQuery to create a table
This article has been dead for over six months. Start a new discussion instead.
|
__label__pos
| 0.960446 |
0
Possible Duplicate:
What can I do if I forgot my Windows password?
I have a friend who bought a new Win7 computer, and he set it up with a new (admin) account and password, and now when he tries to log in with the password he set he gets "The password you have typed is incorrect, check spelling/caps etc..."
Since it seems to have set a different password on that account, and that is the only account on the machine, there is no way to log in, so standard password-recovery tools like Cain&Abel or Ophcrack won't work.
Are there any ways to reset a password without being logged in?
EDIT: Duplicate realized, see @Paul 's comment. Voting to close.
EDIT #2: It was a misspelled password; he was able to recreate the misspelling and log in to change the password back
marked as duplicate by Paul, slhck, Nate Koppenhaver, studiohack Nov 27 '11 at 0:29
This question has been asked before and already has an answer. If those answers do not fully address your question, please ask a new question.
1
If your installation doesn't have drive encryption enabled, you can use this tool to "clear" the passwd for the account, it can be downloaded as a LiveCD
Offline Windows Password & Registry Editor, Bootdisk / CD http://pogostick.net/~pnh/ntpasswd/bootdisk.html
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.604476 |
What are the different types of Storage areas in Windows Azure?
BLOB: A BLOB is a component that allows you to store a large amount of content or binary data, such as photos, audio, and visual documents. They have a capacity of up to 200 terabytes and may be accessed using REST APIs.
Table: Tables represent storage regions for information in the form of cloud characteristics across computers.
Line: A queue’s primary purpose is to facilitate communication between Web and Worker Role instances. They assist in the storage of communications that a consumer may access.
|
__label__pos
| 0.999906 |
types of templates
What is a template example?
What is a Template Example? Template examples are ready-made web design layouts that can be used to quickly create a website. These templates can be customized to match the look and feel of the website, making them an attractive and easy-to-use solution for web developers. In this article, we will examine what a template example What is a template example?
Why is it called template?
What is a Template and Why Is It Called a Template? What is a Template and Why Is It Called a Template? When it comes to creating digital content, templates are an essential part of the process. But what are templates and why is it called a template? In this article, we’ll answer these questions Why is it called template?
What are the two types of templates?
Two Types of Templates When it comes to web development, templates are a popular tool that helps developers create dynamic websites. However, not all templates are created equal. In this article, we’ll explore the two main types of templates and how they differ. Static Templates Static templates are the simplest type of templates. They are What are the two types of templates?
Why is it called template?
Template is a term used to refer to a set of instructions used to create a document or web page. It is an important tool used by many businesses to save time and money when creating documents or websites. But why is it called template? Template is derived from the Latin word “templum” which means Why is it called template?
What is a template used for?
Templates are an incredibly useful tool that can be used to help you save time and energy when creating documents, projects, and other components of your workflow. They are especially beneficial for those who need to create multiple versions of the same document or project quickly and efficiently. A template is a pre-defined document, project, What is a template used for?
What are the two types of templates?
Templates are pre-made documents that can be used to create new documents quickly and easily. They are especially useful for creating many of the same type of document and are used across many different industries from marketing to accounting. There are two main types of templates: static and dynamic. Static templates are the most basic What are the two types of templates?
|
__label__pos
| 1 |
SQL Server Articles, SQL Server Tips, SQL Server Tutorials, SQL Server Tuning, SQL Server DBA, SQL Server Basics, Training, etc - MyTechMantra.com
How to Start SQL Server in Single User Mode?
There can be certain scenarios when one needs to connect to an SQL Server Instance in a Single User Mode by using the Startup Option -m. For example, the need could be to recover a damaged system database such as Master, Model, MSDB etc or you may want to change the server configuration options.
Steps to Start SQL Server in Single User Mode
In this article we will take a look at steps which one needs to follow to Start SQL Server in Single User Mode.
Advantages of Starting SQL Server in Single User Mode
The advantage of starting SQL Server in single-user mode is that it will enable any member of the server’s Local Administrators Group to connect to the instance of SQL Server as a member of SYSADMIN (SA) fixed server role. For more information, we recommend reading Steps to Connect to SQL Server When System Administrators Are Locked Out.
Common Issues Encountered by DBAs when they start SQL Server in Single User Mode
As a Best Practice, stop the SQL Server Agent service from SQL Server Configuration Managerbefore connecting to an instance of SQL Server in single-user mode; otherwise, you will be blocked as SQL Server Agent service will use the only available connection.
How to Manage SQL Server in Single User Mode
When SQL Server is in Single User Mode a DBA should execute TSQL commands either by using SQLCMD or by using Query Editor of SQL Server Management Studio. For detailed information on supported SQL Server Startup option read Different Startup Options for SQL Server Database Engine Service.
Read the following step by step guide to learn How to Connect to SQL Server When System Administrators Are Locked Out.
Related Articles
• What are Virtual Log Files in SQL Server Transaction Log File?
• How to Use Dedicated Administrator Connection in SQL Server
• How to Start SQL Server without TempDB Database?
• Different Startup Options for SQL Server Database Engine Service
• How to Detect Corruption Issues in SQL Server Using Suspect_Pages Table?
• How to Fix SQL Server Database Corruption Issues?
• Steps to Repair a Suspect Database in SQL Server?
• Different Ways to Find Default Trace Location in SQL Server
How to Connect to SQL Server in Single User Mode in a Clustered Installation
In clustered environment when SQL Server is started in single user mode, the cluster resource DLL uses up the available single connection thereby preventing any other connection to SQL Server. Follow the below steps to resolve this issue.
1. From SQL Server Advanced Properties remove –m startup parameter
2. Using Failover Cluster Manager, take the SQL Server Resource Offline
3. Identify the current owner of Cluster Group and run the following command from the command prompt:
Net Start MSSQLSERVER /m
4. Using Failover Cluster Manager verify that the SQL Server Resource is still Offline
5. Using Command Prompt connect to SQL Server instance using SQLCMD and then execute the following command to connect to the instance.
SQLCMD -E -S <servername>
6. Once you have completed the activities close the command prompt and then bring the SQL Server and other resources online using Failover Cluster Manager.
Reference
Ashish Mehta
Ashish Kumar Mehta is a database manager, trainer and technical author. He has more than a decade of IT experience in database administration, performance tuning, database development and technical training on Microsoft SQL Server from SQL Server 2000 to SQL Server 2014. Ashish has authored more than 325 technical articles on SQL Server across leading SQL Server technology portals. Over the last few years, he has also developed and delivered many successful projects in database infrastructure; data warehouse and business intelligence; database migration; and upgrade projects for companies such as Hewlett-Packard, Microsoft, Cognizant and Centrica PLC, UK. He holds an engineering degree in computer science and industry standard certifications from Microsoft including MCITP Database Administrator 2005/2008, MCDBA SQL Server 2000 and MCTS .NET Framework 2.0 Web Applications.
Newsletter Signup! Join 15,000+ Professionals
Be Social! Like & Follow Us
Follow us
Don't be shy, get in touch. We love meeting interesting people and making new friends.
Advertisement
|
__label__pos
| 0.529496 |
0 votes
2.8k views
in Information Processing by (16.9k points)
closed by
Find HCF of 188 and 230 by Euclid’s game.
1 Answer
+1 vote
by (9.0k points)
selected by
Best answer
By Euclid’s game HCF (a, b) = HCF (a, a – b) if a > b.
Here HCF (188, 230) = HCF (230, – 188) because 230 > 188
= HCF (188, 42) = HCF (146, 42)
= HCF (104, 42) = HCF (62, 42)
= HCF (42, 20) = HCF (22, 20)
= HCF (20,2) = HCF (18, 2) = 2
∴ HCF (230, 188) = 2
Welcome to Sarthaks eConnect: A unique platform where students can interact with teachers/experts/students to get solutions to their queries. Students (upto class 10+2) preparing for All Government Exams, CBSE Board Exam, ICSE Board Exam, State Board Exam, JEE (Mains+Advance) and NEET can ask questions from any subject and get quick answers by subject teachers/ experts/mentors/students.
Categories
...
|
__label__pos
| 0.568628 |
Login | Register
LinkedIn
Google+
Twitter
RSS Feed
Download our iPhone app
TODAY'S HEADLINES | ARTICLE ARCHIVE | FORUMS | TIP BANK
Browse DevX
Sign up for e-mail newsletters from DevX
Tip of the Day
Language: Web
Expertise: All
Jun 29, 2000
WEBINAR:
On-Demand
Scrolling DIVs
These days, most Web developers are discouraged from using frames (with good reason). However, users still like the functionality they provide. FRAME and IFRAME are handy scrolling containers, but you may not know that scrolling DIVs can accomplish most of the same things—without the headaches of frames.
Frames are most commonly used to keep some elements stationary while scrolling others. For example, an ASP page might produce a large table that would make the user scroll far past the navigation bars. The table could be placed in a scrolling DIV, where it will scroll just like it was in an IFRAME!
<div style="HEIGHT:100px; WIDTH:50%; OVERFLOW:auto">
<table border=1 width=100%>
<tr><td>stuff</td>
<td>more stuff</td></tr>
<!-- many more table rows -->
<tr><td>stuff</td>
<td>more stuff</td></tr>
</table>
</div>
The CSS-2 attribute OVERFLOW:auto scrolls the element when needed; scrollbars are hidden when they are not needed. OVERFLOW:scroll forces scrollbars to appear. The OVERFLOW attribute is described here: http://www.w3.org/TR/REC-CSS2/visufx.html#overflow-clipping. The HEIGHT attribute must then be set so the DIV will run out of space and begin to scroll.
Scrolling DIVs can be used instead of framesets to keep navigation bars stationary while content scrolls:
<body>
<table width=100% height=100% border=1>
<tr><td colspan=2 height=20% valign=top>
I'm the banner and global navigation.</td></tr>
<tr><td width=20% valign=top>
I'm the left nav bar.</td>
<td width=90% valign=top>
<div id=divContent style="HEIGHT: 100%; OVERFLOW: auto">
<P>I'm the content. I scroll if I need to.</P>
</div></td></tr>
</table>
</body>
I frequently use a scrolling DIV surrounded by a table cell instead of a read-only TEXTAREA:
<table border=1 cellspacing=0 cellpadding=0 width=50%>
<tr><td>
<div style="HEIGHT: 70px; WIDTH: 100%; OVERFLOW: auto">
<INPUT id=checkbox1 name=checkbox1 type=checkbox>Choose Me<BR>
<!-- more checkboxes -->
<INPUT id=checkboxN name=checkboxN type=checkbox>Choose Me<BR>
</div>
</td></tr>
</table>
Of course, almost anything can go inside a DIV, like these checkboxes.
Note:OVERFLOW is supported by IE4+, but not by Navigator.
Chris McCann
Comment and Contribute
(Maximum characters: 1200). You have 1200 characters left.
Sitemap
Thanks for your registration, follow us on our social networks to keep up-to-date
|
__label__pos
| 0.547211 |
Added friend PointSetArrayEditor.
[u/mrichter/AliRoot.git] / EVE / Reve / TrackEditors.cxx
CommitLineData
5a5a1232 1// $Header$
2
3#include "TrackEditors.h"
4#include <Reve/Track.h>
5
6#include <TVirtualPad.h>
7#include <TColor.h>
8
9#include <TGLabel.h>
10#include <TGButton.h>
11#include <TGNumberEntry.h>
12#include <TGColorSelect.h>
13#include <TGDoubleSlider.h>
14
15using namespace Reve;
16
17//______________________________________________________________________
18// TrackListEditor
19//
20
21ClassImp(TrackListEditor)
22
23TrackListEditor::TrackListEditor(const TGWindow *p, Int_t id, Int_t width, Int_t height,
24 UInt_t options, Pixel_t back) :
25 TGedFrame(p, id, width, height, options | kVerticalFrame, back)
26{
27 fTC = 0;
28 MakeTitle("TrackList");
29
30 // --- Limits
31
32 {
33 TGHorizontalFrame* f = new TGHorizontalFrame(this);
34 TGLabel *l = new TGLabel(f, "Max R:");
35 f->AddFrame(l, new TGLayoutHints(kLHintsLeft | kLHintsCenterY, 25, 2, 1, 1));
36 fMaxR = new TGNumberEntry(f, 0., 6, -1,
37 TGNumberFormat::kNESRealOne, TGNumberFormat::kNEAPositive,
38 TGNumberFormat::kNELLimitMinMax, 0.1, 2000.0);
39 fMaxR->GetNumberEntry()->SetToolTipText("Maximum radius to which the tracks will be drawn.");
40 f->AddFrame(fMaxR, new TGLayoutHints(kLHintsLeft, 1, 1, 1, 1));
41 fMaxR->Associate(f);
42 fMaxR->Connect("ValueSet(Long_t)", "Reve::TrackListEditor", this, "DoMaxR()");
43 AddFrame(f, new TGLayoutHints(kLHintsTop, 1, 1, 1, 1));
44 }
45
46 {
47 TGHorizontalFrame* f = new TGHorizontalFrame(this);
48 TGLabel *l = new TGLabel(f, "Max Z:");
49 f->AddFrame(l, new TGLayoutHints(kLHintsLeft | kLHintsCenterY, 24, 2, 1, 1));
50 fMaxZ = new TGNumberEntry(f, 0., 6, -1,
51 TGNumberFormat::kNESRealOne, TGNumberFormat::kNEAPositive,
52 TGNumberFormat::kNELLimitMinMax, 0.1, 2000.0);
53 fMaxZ->GetNumberEntry()->SetToolTipText("Maximum z-coordinate to which the tracks will be drawn.");
54 f->AddFrame(fMaxZ, new TGLayoutHints(kLHintsLeft, 1, 1, 1, 1));
55 fMaxZ->Associate(f);
56 fMaxZ->Connect("ValueSet(Long_t)", "Reve::TrackListEditor", this, "DoMaxZ()");
57 AddFrame(f, new TGLayoutHints(kLHintsTop, 1, 1, 1, 1));
58 }
59
60 {
61 TGHorizontalFrame* f = new TGHorizontalFrame(this);
62 TGLabel *l = new TGLabel(f, "Max Orbits:");
63 f->AddFrame(l, new TGLayoutHints(kLHintsTop | kLHintsCenterY, 0, 2, 1, 1));
64 fMaxOrbits = new TGNumberEntry(f, 0., 6, -1,
65 TGNumberFormat::kNESRealOne, TGNumberFormat::kNEAPositive,
66 TGNumberFormat::kNELLimitMinMax, 0.1, 100.0);
67 fMaxOrbits->GetNumberEntry()->SetToolTipText("Maximal angular path of tracks' orbits (1 ~ 2Pi).");
68 f->AddFrame(fMaxOrbits, new TGLayoutHints(kLHintsLeft, 1, 1, 1, 1));
69 fMaxOrbits->Associate(f);
70 fMaxOrbits->Connect("ValueSet(Long_t)", "Reve::TrackListEditor", this, "DoMaxOrbits()");
71 AddFrame(f, new TGLayoutHints(kLHintsTop, 1, 1, 1, 1));
72 }
73
74 {
75 TGHorizontalFrame* f = new TGHorizontalFrame(this);
76 TGLabel *l = new TGLabel(f, "Min Angle:");
77 f->AddFrame(l, new TGLayoutHints(kLHintsTop | kLHintsCenterY, 3, 2, 1, 1));
78 fMinAng = new TGNumberEntry(f, 0., 6, -1,
79 TGNumberFormat::kNESRealOne, TGNumberFormat::kNEAPositive,
80 TGNumberFormat::kNELLimitMinMax, 1, 180.0);
81 fMinAng->GetNumberEntry()->SetToolTipText("Minimal angular step between two helix points.");
82 f->AddFrame(fMinAng, new TGLayoutHints(kLHintsLeft, 1, 1, 1, 1));
83 fMinAng->Associate(f);
84 fMinAng->Connect("ValueSet(Long_t)", "Reve::TrackListEditor", this, "DoMinAng()");
85 AddFrame(f, new TGLayoutHints(kLHintsTop, 1, 1, 1, 1));
86 }
87
88 {
89 TGHorizontalFrame* f = new TGHorizontalFrame(this);
90 TGLabel *l = new TGLabel(f, "Delta:");
91 f->AddFrame(l, new TGLayoutHints(kLHintsTop | kLHintsCenterY, 32, 2, 1, 1));
92 fDelta = new TGNumberEntry(f, 0., 6, -1,
93 TGNumberFormat::kNESRealOne, TGNumberFormat::kNEAPositive,
94 TGNumberFormat::kNELLimitMinMax, 0.001, 100.0);
95 fDelta->GetNumberEntry()->SetToolTipText("Maximal error at the mid-point of the line connecting to helix points.");
96 f->AddFrame(fDelta, new TGLayoutHints(kLHintsLeft, 1, 1, 1, 1));
97 fDelta->Associate(f);
98 fDelta->Connect("ValueSet(Long_t)", "Reve::TrackListEditor", this, "DoDelta()");
99 AddFrame(f, new TGLayoutHints(kLHintsTop, 1, 1, 1, 1));
100 }
101
102 // --- Rendering control
103
104 fRnrTracks = new TGCheckButton(this, "Render tracks");
105 AddFrame(fRnrTracks, new TGLayoutHints(kLHintsTop, 3, 1, 1, 0));
106 fRnrTracks->Connect
107 ("Toggled(Bool_t)",
108 "Reve::TrackListEditor", this, "DoRnrTracks()");
109
110 fRnrMarkers = new TGCheckButton(this, "Render markers");
111 AddFrame(fRnrMarkers, new TGLayoutHints(kLHintsTop, 3, 1, 1, 0));
112 fRnrMarkers->Connect
113 ("Toggled(Bool_t)",
114 "Reve::TrackListEditor", this, "DoRnrMarkers()");
115
116 // --- Kinematics fitting
117
118 fFitDaughters = new TGCheckButton(this, "Fit daughters");
119 AddFrame(fFitDaughters, new TGLayoutHints(kLHintsTop, 3, 1, 1, 0));
120 fFitDaughters->Connect("Toggled(Bool_t)","Reve::TrackListEditor", this, "DoFitDaughters()");
121
122 fFitDecay = new TGCheckButton(this, "Fit decay");
123 AddFrame(fFitDecay, new TGLayoutHints(kLHintsTop, 3, 1, 1, 0));
124 fFitDecay->Connect("Toggled(Bool_t)","Reve::TrackListEditor", this, "DoFitDecay()");
125
126 // --- Selectors
127
128 TGDoubleHSlider* hs = new TGDoubleHSlider(this);
129 hs->SetRange(0.1, 10);
130 hs->SetPosition(0.1, 10);
131 hs->Resize(260, 20);
132 AddFrame(hs, new TGLayoutHints(kLHintsLeft, 0, 5));
133 hs->Connect("PositionChanged()", "Reve::TrackListEditor",
134 this, "DoPtScroll()");
135
136
137 // Register the editor.
138 TClass *cl = TrackList::Class();
139 TGedElement *ge = new TGedElement;
140 ge->fGedFrame = this;
141 ge->fCanvas = 0;
142 cl->GetEditorList()->Add(ge);
143}
144
145TrackListEditor::~TrackListEditor()
146{}
147
148/**************************************************************************/
149
150void TrackListEditor::SetModel(TVirtualPad* pad, TObject* obj, Int_t )
151{
152 fModel = 0;
153 fPad = 0;
154
155 if (!obj || !obj->InheritsFrom(TrackList::Class()) || obj->InheritsFrom(TVirtualPad::Class())) {
156 SetActive(kFALSE);
157 return;
158 }
159
160 fModel = obj;
161 fPad = pad;
162
163 fTC = dynamic_cast<TrackList*>(fModel);
164
165 fMaxR->SetNumber(fTC->GetMaxR());
166 fMaxZ->SetNumber(fTC->GetMaxZ());
167 fMaxOrbits->SetNumber(fTC->GetMaxOrbs());
168 fMinAng->SetNumber(fTC->GetMinAng());
169 fDelta->SetNumber(fTC->GetDelta());
170
171 fRnrTracks->SetState(fTC->GetRnrTracks() ? kButtonDown : kButtonUp);
172 fRnrMarkers->SetState(fTC->GetRnrMarkers() ? kButtonDown : kButtonUp);
173
174 fFitDaughters->SetState(fTC->GetFitDaughters() ? kButtonDown : kButtonUp);
175 fFitDecay->SetState(fTC->GetFitDecay() ? kButtonDown : kButtonUp);
176
177 SetActive();
178}
179
180/**************************************************************************/
181
182void TrackListEditor::DoMaxR()
183{
184 Double_t maxr = fMaxR->GetNumber();
185 fTC->SetMaxR(maxr);
186 Update();
187}
188
189void TrackListEditor::DoMaxZ()
190{
191 fTC->SetMaxZ(fMaxZ->GetNumber());
192 Update();
193}
194
195void TrackListEditor::DoMaxOrbits()
196{
197 fTC->SetMaxOrbs(fMaxOrbits->GetNumber());
198 Update();
199}
200
201void TrackListEditor::DoMinAng()
202{
203 fTC->SetMinAng(fMinAng->GetNumber());
204 Update();
205}
206
207void TrackListEditor::DoDelta()
208{
209 fTC->SetDelta(fDelta->GetNumber());
210 Update();
211}
212
213/**************************************************************************/
214
215void TrackListEditor::DoRnrTracks()
216{
217 fTC->SetRnrTracks(fRnrTracks->IsOn());
218 Update();
219}
220
221void TrackListEditor::DoRnrMarkers()
222{
223 fTC->SetRnrMarkers(fRnrMarkers->IsOn());
224 Update();
225}
226
227/**************************************************************************/
228
229void TrackListEditor::DoFitDaughters()
230{
231 fTC->SetFitDaughters(fFitDaughters->IsOn());
232 Update();
233}
234
235void TrackListEditor::DoFitDecay()
236{
237 fTC->SetFitDecay(fFitDecay->IsOn());
238 Update();
239}
240
241/**************************************************************************/
242
243void TrackListEditor::DoPtScroll()
244{
245 TGDoubleHSlider* hs = (TGDoubleHSlider*)gTQSender;
246
247 Double_t min = hs->GetMinPosition(), max = hs->GetMaxPosition();
248 printf("Track pt range: min=%f max=%f\n", min, max);
249 fTC->SelectByPt(min, max);
250}
|
__label__pos
| 0.8559 |
Boost C++ Libraries
...one of the most highly regarded and expertly designed C++ library projects in the world. Herb Sutter and Andrei Alexandrescu, C++ Coding Standards
This is the documentation for an old version of Boost. Click here to view this page for the latest version.
boost/test/impl/execution_monitor.ipp
// (C) Copyright Gennadiy Rozental 2001-2008.
// (C) Copyright Beman Dawes and Ullrich Koethe 1995-2001.
// Use, modification, and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/libs/test for the library home page.
//
// File : $RCSfile$
//
// Version : $Revision: 54633 $
//
// Description : provides execution monitor implementation for all supported
// configurations, including Microsoft structured exception based, unix signals
// based and special workarounds for borland
//
// Note that when testing requirements or user wishes preclude use of this
// file as a separate compilation unit, it may be included as a header file.
//
// Header dependencies are deliberately restricted to reduce coupling to other
// boost libraries.
// ***************************************************************************
#ifndef BOOST_TEST_EXECUTION_MONITOR_IPP_012205GER
#define BOOST_TEST_EXECUTION_MONITOR_IPP_012205GER
// Boost.Test
#include <boost/test/detail/config.hpp>
#include <boost/test/detail/workaround.hpp>
#include <boost/test/execution_monitor.hpp>
#include <boost/test/debug.hpp>
// Boost
#include <boost/cstdlib.hpp> // for exit codes
#include <boost/config.hpp> // for workarounds
// STL
#include <string> // for std::string
#include <new> // for std::bad_alloc
#include <typeinfo> // for std::bad_cast, std::bad_typeid
#include <exception> // for std::exception, std::bad_exception
#include <stdexcept> // for std exception hierarchy
#include <cstring> // for C string API
#include <cassert> // for assert
#include <cstddef> // for NULL
#include <cstdio> // for vsnprintf
#include <cstdarg> // for varargs
#ifdef BOOST_NO_STDC_NAMESPACE
namespace std { using ::strerror; using ::strlen; using ::strncat; }
#endif
// to use vsnprintf
#if defined(__SUNPRO_CC) || defined(__SunOS)
# include <stdio.h>
# include <stdarg.h>
using std::va_list;
#endif
// to use vsnprintf
#if defined(__QNXNTO__)
# include <stdio.h>
#endif
#if defined(_WIN32) && !defined(BOOST_DISABLE_WIN32) && \
(!defined(__COMO__) && !defined(__MWERKS__) && !defined(__GNUC__) || \
BOOST_WORKAROUND(__MWERKS__, >= 0x3000))
# define BOOST_SEH_BASED_SIGNAL_HANDLING
# include <windows.h>
# if defined(__MWERKS__) || (defined(_MSC_VER) && !defined(UNDER_CE))
# include <eh.h>
# endif
# if defined(__BORLANDC__) && __BORLANDC__ >= 0x560 || defined(__MWERKS__)
# include <stdint.h>
# endif
# if defined(__BORLANDC__) && __BORLANDC__ < 0x560
typedef unsigned uintptr_t;
# endif
# if BOOST_WORKAROUND(_MSC_VER, < 1300 ) || defined(UNDER_CE)
typedef void* uintptr_t;
# endif
// for the FP control routines
#include <float.h>
#ifndef EM_INVALID
#define EM_INVALID _EM_INVALID
#endif
#ifndef EM_DENORMAL
#define EM_DENORMAL _EM_DENORMAL
#endif
#ifndef EM_ZERODIVIDE
#define EM_ZERODIVIDE _EM_ZERODIVIDE
#endif
#ifndef EM_OVERFLOW
#define EM_OVERFLOW _EM_OVERFLOW
#endif
#ifndef EM_UNDERFLOW
#define EM_UNDERFLOW _EM_UNDERFLOW
#endif
#ifndef MCW_EM
#define MCW_EM _MCW_EM
#endif
# if !defined(NDEBUG) && defined(_MSC_VER) && !defined(UNDER_CE)
# include <crtdbg.h>
# define BOOST_TEST_CRT_HOOK_TYPE _CRT_REPORT_HOOK
# define BOOST_TEST_CRT_ASSERT _CRT_ASSERT
# define BOOST_TEST_CRT_ERROR _CRT_ERROR
# define BOOST_TEST_CRT_SET_HOOK(H) _CrtSetReportHook(H)
# else
# define BOOST_TEST_CRT_HOOK_TYPE void*
# define BOOST_TEST_CRT_ASSERT 2
# define BOOST_TEST_CRT_ERROR 1
# define BOOST_TEST_CRT_SET_HOOK(H) (void*)(H)
# endif
# if !BOOST_WORKAROUND(_MSC_VER, >= 1400 ) || defined(UNDER_CE)
typedef void* _invalid_parameter_handler;
inline _invalid_parameter_handler
_set_invalid_parameter_handler( _invalid_parameter_handler arg )
{
return arg;
}
# endif
# if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x0564)) || defined(UNDER_CE)
namespace { void _set_se_translator( void* ) {} }
# endif
#elif defined(BOOST_HAS_SIGACTION)
# define BOOST_SIGACTION_BASED_SIGNAL_HANDLING
# include <unistd.h>
# include <signal.h>
# include <setjmp.h>
# if defined(__FreeBSD__)
# ifndef SIGPOLL
# define SIGPOLL SIGIO
# endif
# if (__FreeBSD_version < 70100)
# define ILL_ILLADR 0 // ILL_RESAD_FAULT
# define ILL_PRVOPC ILL_PRIVIN_FAULT
# define ILL_ILLOPN 2 // ILL_RESOP_FAULT
# define ILL_COPROC ILL_FPOP_FAULT
# define BOOST_TEST_LIMITED_SIGNAL_DETAILS
# define BOOST_TEST_IGNORE_SIGCHLD
# endif
# endif
# if !defined(__CYGWIN__) && !defined(__QNXNTO__)
# define BOOST_TEST_USE_ALT_STACK
# endif
# if defined(SIGPOLL) && !defined(__CYGWIN__) && \
!(defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)) && \
!defined(__NetBSD__) && \
!defined(__QNXNTO__)
# define BOOST_TEST_CATCH_SIGPOLL
# endif
# ifdef BOOST_TEST_USE_ALT_STACK
# define BOOST_TEST_ALT_STACK_SIZE SIGSTKSZ
# endif
#else
# define BOOST_NO_SIGNAL_HANDLING
#endif
#ifndef UNDER_CE
#include <errno.h>
#endif
#include <boost/test/detail/suppress_warnings.hpp>
//____________________________________________________________________________//
namespace boost {
// ************************************************************************** //
// ************** report_error ************** //
// ************************************************************************** //
namespace detail {
#ifdef __BORLANDC__
# define BOOST_TEST_VSNPRINTF( a1, a2, a3, a4 ) std::vsnprintf( (a1), (a2), (a3), (a4) )
#elif BOOST_WORKAROUND(_MSC_VER, <= 1310) || \
BOOST_WORKAROUND(__MWERKS__, BOOST_TESTED_AT(0x3000)) || \
defined(UNDER_CE)
# define BOOST_TEST_VSNPRINTF( a1, a2, a3, a4 ) _vsnprintf( (a1), (a2), (a3), (a4) )
#else
# define BOOST_TEST_VSNPRINTF( a1, a2, a3, a4 ) vsnprintf( (a1), (a2), (a3), (a4) )
#endif
static void
report_error( execution_exception::error_code ec, char const* format, ... )
{
static const int REPORT_ERROR_BUFFER_SIZE = 512;
static char buf[REPORT_ERROR_BUFFER_SIZE];
va_list args;
va_start( args, format );
BOOST_TEST_VSNPRINTF( buf, sizeof(buf)-1, format, args );
buf[sizeof(buf)-1] = 0;
va_end( args );
throw execution_exception( ec, buf );
}
//____________________________________________________________________________//
template<typename Tr,typename Functor>
inline int
do_invoke( Tr const& tr, Functor const& F )
{
return tr ? (*tr)( F ) : F();
}
//____________________________________________________________________________//
} // namespace detail
#if defined(BOOST_SIGACTION_BASED_SIGNAL_HANDLING)
// ************************************************************************** //
// ************** Sigaction based signal handling ************** //
// ************************************************************************** //
namespace detail {
// ************************************************************************** //
// ************** boost::detail::system_signal_exception ************** //
// ************************************************************************** //
class system_signal_exception {
public:
// Constructor
system_signal_exception()
: m_sig_info( 0 )
, m_context( 0 )
{}
// Access methods
void operator()( siginfo_t* i, void* c )
{
m_sig_info = i;
m_context = c;
}
void report() const;
private:
// Data members
siginfo_t* m_sig_info; // system signal detailed info
void* m_context; // signal context
};
//____________________________________________________________________________//
void
system_signal_exception::report() const
{
if( !m_sig_info )
return; // no error actually occur?
switch( m_sig_info->si_code ) {
case SI_USER:
report_error( execution_exception::system_error,
"signal: generated by kill() (or family); uid=%d; pid=%d",
(int)m_sig_info->si_uid, (int)m_sig_info->si_pid );
break;
case SI_QUEUE:
report_error( execution_exception::system_error,
"signal: sent by sigqueue()" );
break;
case SI_TIMER:
report_error( execution_exception::system_error,
"signal: the expiration of a timer set by timer_settimer()" );
break;
case SI_ASYNCIO:
report_error( execution_exception::system_error,
"signal: generated by the completion of an asynchronous I/O request" );
break;
case SI_MESGQ:
report_error( execution_exception::system_error,
"signal: generated by the the arrival of a message on an empty message queue" );
break;
default:
break;
}
switch( m_sig_info->si_signo ) {
case SIGILL:
switch( m_sig_info->si_code ) {
#ifndef BOOST_TEST_LIMITED_SIGNAL_DETAILS
case ILL_ILLOPC:
report_error( execution_exception::system_fatal_error,
"signal: illegal opcode; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case ILL_ILLTRP:
report_error( execution_exception::system_fatal_error,
"signal: illegal trap; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case ILL_PRVREG:
report_error( execution_exception::system_fatal_error,
"signal: privileged register; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case ILL_BADSTK:
report_error( execution_exception::system_fatal_error,
"signal: internal stack error; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
#endif
case ILL_ILLOPN:
report_error( execution_exception::system_fatal_error,
"signal: illegal operand; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case ILL_ILLADR:
report_error( execution_exception::system_fatal_error,
"signal: illegal addressing mode; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case ILL_PRVOPC:
report_error( execution_exception::system_fatal_error,
"signal: privileged opcode; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case ILL_COPROC:
report_error( execution_exception::system_fatal_error,
"signal: co-processor error; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
default:
report_error( execution_exception::system_fatal_error,
"signal: SIGILL, si_code: %d (illegal instruction; address of failing instruction: 0x%08lx)",
m_sig_info->si_addr, m_sig_info->si_code );
break;
}
break;
case SIGFPE:
switch( m_sig_info->si_code ) {
case FPE_INTDIV:
report_error( execution_exception::system_error,
"signal: integer divide by zero; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case FPE_INTOVF:
report_error( execution_exception::system_error,
"signal: integer overflow; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case FPE_FLTDIV:
report_error( execution_exception::system_error,
"signal: floating point divide by zero; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case FPE_FLTOVF:
report_error( execution_exception::system_error,
"signal: floating point overflow; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case FPE_FLTUND:
report_error( execution_exception::system_error,
"signal: floating point underflow; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case FPE_FLTRES:
report_error( execution_exception::system_error,
"signal: floating point inexact result; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case FPE_FLTINV:
report_error( execution_exception::system_error,
"signal: invalid floating point operation; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
case FPE_FLTSUB:
report_error( execution_exception::system_error,
"signal: subscript out of range; address of failing instruction: 0x%08lx",
m_sig_info->si_addr );
break;
default:
report_error( execution_exception::system_error,
"signal: SIGFPE, si_code: %d (errnoneous arithmetic operations; address of failing instruction: 0x%08lx)",
m_sig_info->si_addr, m_sig_info->si_code );
break;
}
break;
case SIGSEGV:
switch( m_sig_info->si_code ) {
#ifndef BOOST_TEST_LIMITED_SIGNAL_DETAILS
case SEGV_MAPERR:
report_error( execution_exception::system_fatal_error,
"memory access violation at address: 0x%08lx: no mapping at fault address",
m_sig_info->si_addr );
break;
case SEGV_ACCERR:
report_error( execution_exception::system_fatal_error,
"memory access violation at address: 0x%08lx: invalid permissions",
m_sig_info->si_addr );
break;
#endif
default:
report_error( execution_exception::system_fatal_error,
"signal: SIGSEGV, si_code: %d (memory access violation at address: 0x%08lx)",
m_sig_info->si_addr, m_sig_info->si_code );
break;
}
break;
case SIGBUS:
switch( m_sig_info->si_code ) {
#ifndef BOOST_TEST_LIMITED_SIGNAL_DETAILS
case BUS_ADRALN:
report_error( execution_exception::system_fatal_error,
"memory access violation at address: 0x%08lx: invalid address alignment",
m_sig_info->si_addr );
break;
case BUS_ADRERR:
report_error( execution_exception::system_fatal_error,
"memory access violation at address: 0x%08lx: non-existent physical address",
m_sig_info->si_addr );
break;
case BUS_OBJERR:
report_error( execution_exception::system_fatal_error,
"memory access violation at address: 0x%08lx: object specific hardware error",
m_sig_info->si_addr );
break;
#endif
default:
report_error( execution_exception::system_fatal_error,
"signal: SIGSEGV, si_code: %d (memory access violation at address: 0x%08lx)",
m_sig_info->si_addr, m_sig_info->si_code );
break;
}
break;
case SIGCHLD:
switch( m_sig_info->si_code ) {
#ifndef BOOST_TEST_LIMITED_SIGNAL_DETAILS
case CLD_EXITED:
report_error( execution_exception::system_error,
"child has exited; pid: %d; uid: %d; exit value: %d",
(int)m_sig_info->si_pid, (int)m_sig_info->si_uid, (int)m_sig_info->si_status );
break;
case CLD_KILLED:
report_error( execution_exception::system_error,
"child was killed; pid: %d; uid: %d; exit value: %d",
(int)m_sig_info->si_pid, (int)m_sig_info->si_uid, (int)m_sig_info->si_status );
break;
case CLD_DUMPED:
report_error( execution_exception::system_error,
"child terminated abnormally; pid: %d; uid: %d; exit value: %d",
(int)m_sig_info->si_pid, (int)m_sig_info->si_uid, (int)m_sig_info->si_status );
break;
case CLD_TRAPPED:
report_error( execution_exception::system_error,
"traced child has trapped; pid: %d; uid: %d; exit value: %d",
(int)m_sig_info->si_pid, (int)m_sig_info->si_uid, (int)m_sig_info->si_status );
break;
case CLD_STOPPED:
report_error( execution_exception::system_error,
"child has stopped; pid: %d; uid: %d; exit value: %d",
(int)m_sig_info->si_pid, (int)m_sig_info->si_uid, (int)m_sig_info->si_status );
break;
case CLD_CONTINUED:
report_error( execution_exception::system_error,
"stopped child had continued; pid: %d; uid: %d; exit value: %d",
(int)m_sig_info->si_pid, (int)m_sig_info->si_uid, (int)m_sig_info->si_status );
break;
#endif
default:
report_error( execution_exception::system_error,
"signal: SIGCHLD, si_code: %d (child process has terminated; pid: %d; uid: %d; exit value: %d)",
(int)m_sig_info->si_pid, (int)m_sig_info->si_uid, (int)m_sig_info->si_status, m_sig_info->si_code );
break;
}
break;
#if defined(BOOST_TEST_CATCH_SIGPOLL)
case SIGPOLL:
switch( m_sig_info->si_code ) {
#ifndef BOOST_TEST_LIMITED_SIGNAL_DETAILS
case POLL_IN:
report_error( execution_exception::system_error,
"data input available; band event %d",
(int)m_sig_info->si_band );
break;
case POLL_OUT:
report_error( execution_exception::system_error,
"output buffers available; band event %d",
(int)m_sig_info->si_band );
break;
case POLL_MSG:
report_error( execution_exception::system_error,
"input message available; band event %d",
(int)m_sig_info->si_band );
break;
case POLL_ERR:
report_error( execution_exception::system_error,
"i/o error; band event %d",
(int)m_sig_info->si_band );
break;
case POLL_PRI:
report_error( execution_exception::system_error,
"high priority input available; band event %d",
(int)m_sig_info->si_band );
break;
#if defined(POLL_ERR) && defined(POLL_HUP) && (POLL_ERR - POLL_HUP)
case POLL_HUP:
report_error( execution_exception::system_error,
"device disconnected; band event %d",
(int)m_sig_info->si_band );
break;
#endif
#endif
default:
report_error( execution_exception::system_error,
"signal: SIGPOLL, si_code: %d (asynchronous I/O event occured; band event %d)",
(int)m_sig_info->si_band, m_sig_info->si_code );
break;
}
break;
#endif
case SIGABRT:
report_error( execution_exception::system_error,
"signal: SIGABRT (application abort requested)" );
break;
case SIGALRM:
report_error( execution_exception::timeout_error,
"signal: SIGALRM (timeout while executing function)" );
break;
default:
report_error( execution_exception::system_error, "unrecognized signal" );
}
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** boost::detail::signal_action ************** //
// ************************************************************************** //
// Forward declaration
extern "C" {
static void execution_monitor_jumping_signal_handler( int sig, siginfo_t* info, void* context );
static void execution_monitor_attaching_signal_handler( int sig, siginfo_t* info, void* context );
}
class signal_action {
typedef struct sigaction* sigaction_ptr;
public:
//Constructor
signal_action();
signal_action( int sig, bool install, bool attach_dbg, char* alt_stack );
~signal_action();
private:
// Data members
int m_sig;
bool m_installed;
struct sigaction m_new_action;
struct sigaction m_old_action;
};
//____________________________________________________________________________//
signal_action::signal_action()
: m_installed( false )
{}
//____________________________________________________________________________//
signal_action::signal_action( int sig, bool install, bool attach_dbg, char* alt_stack )
: m_sig( sig )
, m_installed( install )
{
if( !install )
return;
std::memset( &m_new_action, 0, sizeof(struct sigaction) );
BOOST_TEST_SYS_ASSERT( ::sigaction( m_sig , sigaction_ptr(), &m_new_action ) != -1 );
if( m_new_action.sa_sigaction || m_new_action.sa_handler ) {
m_installed = false;
return;
}
m_new_action.sa_flags |= SA_SIGINFO;
m_new_action.sa_sigaction = attach_dbg ? &execution_monitor_attaching_signal_handler
: &execution_monitor_jumping_signal_handler;
BOOST_TEST_SYS_ASSERT( sigemptyset( &m_new_action.sa_mask ) != -1 );
#ifdef BOOST_TEST_USE_ALT_STACK
if( alt_stack )
m_new_action.sa_flags |= SA_ONSTACK;
#endif
BOOST_TEST_SYS_ASSERT( ::sigaction( m_sig, &m_new_action, &m_old_action ) != -1 );
}
//____________________________________________________________________________//
signal_action::~signal_action()
{
if( m_installed )
::sigaction( m_sig, &m_old_action , sigaction_ptr() );
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** boost::detail::signal_handler ************** //
// ************************************************************************** //
class signal_handler {
public:
// Constructor
explicit signal_handler( bool catch_system_errors, int timeout, bool attach_dbg, char* alt_stack );
// Destructor
~signal_handler();
// access methods
static sigjmp_buf& jump_buffer()
{
assert( !!s_active_handler );
return s_active_handler->m_sigjmp_buf;
}
static system_signal_exception& sys_sig()
{
assert( !!s_active_handler );
return s_active_handler->m_sys_sig;
}
private:
// Data members
signal_handler* m_prev_handler;
int m_timeout;
signal_action m_ILL_action;
signal_action m_FPE_action;
signal_action m_SEGV_action;
signal_action m_BUS_action;
signal_action m_CHLD_action;
signal_action m_POLL_action;
signal_action m_ABRT_action;
signal_action m_ALRM_action;
sigjmp_buf m_sigjmp_buf;
system_signal_exception m_sys_sig;
static signal_handler* s_active_handler;
};
// !! need to be placed in thread specific storage
typedef signal_handler* signal_handler_ptr;
signal_handler* signal_handler::s_active_handler = signal_handler_ptr();
//____________________________________________________________________________//
signal_handler::signal_handler( bool catch_system_errors, int timeout, bool attach_dbg, char* alt_stack )
: m_prev_handler( s_active_handler )
, m_timeout( timeout )
, m_ILL_action ( SIGILL , catch_system_errors, attach_dbg, alt_stack )
, m_FPE_action ( SIGFPE , catch_system_errors, attach_dbg, alt_stack )
, m_SEGV_action( SIGSEGV, catch_system_errors, attach_dbg, alt_stack )
, m_BUS_action ( SIGBUS , catch_system_errors, attach_dbg, alt_stack )
#ifndef BOOST_TEST_IGNORE_SIGCHLD
, m_CHLD_action( SIGCHLD, catch_system_errors, attach_dbg, alt_stack )
#endif
#ifdef BOOST_TEST_CATCH_SIGPOLL
, m_POLL_action( SIGPOLL, catch_system_errors, attach_dbg, alt_stack )
#endif
, m_ABRT_action( SIGABRT, catch_system_errors, attach_dbg, alt_stack )
, m_ALRM_action( SIGALRM, timeout > 0 , attach_dbg, alt_stack )
{
s_active_handler = this;
if( m_timeout > 0 ) {
::alarm( 0 );
::alarm( timeout );
}
#ifdef BOOST_TEST_USE_ALT_STACK
if( alt_stack ) {
stack_t sigstk;
std::memset( &sigstk, 0, sizeof(stack_t) );
BOOST_TEST_SYS_ASSERT( ::sigaltstack( 0, &sigstk ) != -1 );
if( sigstk.ss_flags & SS_DISABLE ) {
sigstk.ss_sp = alt_stack;
sigstk.ss_size = BOOST_TEST_ALT_STACK_SIZE;
sigstk.ss_flags = 0;
BOOST_TEST_SYS_ASSERT( ::sigaltstack( &sigstk, 0 ) != -1 );
}
}
#endif
}
//____________________________________________________________________________//
signal_handler::~signal_handler()
{
assert( s_active_handler == this );
if( m_timeout > 0 )
::alarm( 0 );
#ifdef BOOST_TEST_USE_ALT_STACK
stack_t sigstk = {};
sigstk.ss_size = MINSIGSTKSZ;
sigstk.ss_flags = SS_DISABLE;
BOOST_TEST_SYS_ASSERT( ::sigaltstack( &sigstk, 0 ) != -1 );
#endif
s_active_handler = m_prev_handler;
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** execution_monitor_signal_handler ************** //
// ************************************************************************** //
extern "C" {
static bool ignore_sigchild( siginfo_t* info )
{
return info->si_signo == SIGCHLD
#ifndef BOOST_TEST_LIMITED_SIGNAL_DETAILS
&& info->si_code == CLD_EXITED
#endif
#ifdef BOOST_TEST_IGNORE_NON_ZERO_CHILD_CODE
;
#else
&& (int)info->si_status == 0;
#endif
}
//____________________________________________________________________________//
static void execution_monitor_jumping_signal_handler( int sig, siginfo_t* info, void* context )
{
if( ignore_sigchild( info ) )
return;
signal_handler::sys_sig()( info, context );
siglongjmp( signal_handler::jump_buffer(), sig );
}
//____________________________________________________________________________//
static void execution_monitor_attaching_signal_handler( int sig, siginfo_t* info, void* context )
{
if( ignore_sigchild( info ) )
return;
if( !debug::attach_debugger( false ) )
execution_monitor_jumping_signal_handler( sig, info, context );
// debugger attached; it will handle the signal
BOOST_TEST_SYS_ASSERT( ::signal( sig, SIG_DFL ) != SIG_ERR );
}
//____________________________________________________________________________//
}
} // namespace detail
// ************************************************************************** //
// ************** execution_monitor::catch_signals ************** //
// ************************************************************************** //
int
execution_monitor::catch_signals( unit_test::callback0<int> const& F )
{
using namespace detail;
#if defined(__CYGWIN__)
p_catch_system_errors.value = false;
#endif
#ifdef BOOST_TEST_USE_ALT_STACK
if( !!p_use_alt_stack && !m_alt_stack )
m_alt_stack.reset( new char[BOOST_TEST_ALT_STACK_SIZE] );
#else
p_use_alt_stack.value = false;
#endif
signal_handler local_signal_handler( p_catch_system_errors, p_timeout, p_auto_start_dbg,
!p_use_alt_stack ? 0 : m_alt_stack.get() );
if( !sigsetjmp( signal_handler::jump_buffer(), 1 ) )
return detail::do_invoke( m_custom_translators , F );
else
throw local_signal_handler.sys_sig();
}
//____________________________________________________________________________//
#elif defined(BOOST_SEH_BASED_SIGNAL_HANDLING)
// ************************************************************************** //
// ************** Microsoft structured exception handling ************** //
// ************************************************************************** //
#if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x0564))
namespace { void _set_se_translator( void* ) {} }
#endif
namespace detail {
// ************************************************************************** //
// ************** boost::detail::system_signal_exception ************** //
// ************************************************************************** //
class system_signal_exception {
public:
// Constructor
explicit system_signal_exception( execution_monitor* em )
: m_em( em )
, m_se_id( 0 )
, m_fault_address( 0 )
, m_dir( false )
{}
void report() const;
int operator()( unsigned int id, _EXCEPTION_POINTERS* exps );
private:
// Data members
execution_monitor* m_em;
unsigned int m_se_id;
void* m_fault_address;
bool m_dir;
};
static void
seh_catch_preventer( unsigned int /* id */, _EXCEPTION_POINTERS* /* exps */ )
{
throw;
}
//____________________________________________________________________________//
int
system_signal_exception::operator()( unsigned int id, _EXCEPTION_POINTERS* exps )
{
const unsigned int MSFT_CPP_EXCEPT = 0xE06d7363; // EMSC
if( !m_em->p_catch_system_errors || (id == MSFT_CPP_EXCEPT) )
return EXCEPTION_CONTINUE_SEARCH;
if( !!m_em->p_auto_start_dbg && debug::attach_debugger( false ) ) {
m_em->p_catch_system_errors.value = false;
_set_se_translator( &seh_catch_preventer );
return EXCEPTION_CONTINUE_EXECUTION;
}
m_se_id = id;
if( m_se_id == EXCEPTION_ACCESS_VIOLATION && exps->ExceptionRecord->NumberParameters == 2 ) {
m_fault_address = (void*)exps->ExceptionRecord->ExceptionInformation[1];
m_dir = exps->ExceptionRecord->ExceptionInformation[0] == 0;
}
return EXCEPTION_EXECUTE_HANDLER;
}
//____________________________________________________________________________//
void
system_signal_exception::report() const
{
switch( m_se_id ) {
// cases classified as system_fatal_error
case EXCEPTION_ACCESS_VIOLATION: {
if( !m_fault_address )
detail::report_error( execution_exception::system_fatal_error, "memory access violation" );
else
detail::report_error(
execution_exception::system_fatal_error,
"memory access violation occurred at address 0x%08lx, while attempting to %s",
m_fault_address,
m_dir ? " read inaccessible data"
: " write to an inaccessible (or protected) address"
);
break;
}
case EXCEPTION_ILLEGAL_INSTRUCTION:
detail::report_error( execution_exception::system_fatal_error, "illegal instruction" );
break;
case EXCEPTION_PRIV_INSTRUCTION:
detail::report_error( execution_exception::system_fatal_error, "tried to execute an instruction whose operation is not allowed in the current machine mode" );
break;
case EXCEPTION_IN_PAGE_ERROR:
detail::report_error( execution_exception::system_fatal_error, "access to a memory page that is not present" );
break;
case EXCEPTION_STACK_OVERFLOW:
detail::report_error( execution_exception::system_fatal_error, "stack overflow" );
break;
case EXCEPTION_NONCONTINUABLE_EXCEPTION:
detail::report_error( execution_exception::system_fatal_error, "tried to continue execution after a non continuable exception occurred" );
break;
// cases classified as (non-fatal) system_trap
case EXCEPTION_DATATYPE_MISALIGNMENT:
detail::report_error( execution_exception::system_error, "data misalignment" );
break;
case EXCEPTION_INT_DIVIDE_BY_ZERO:
detail::report_error( execution_exception::system_error, "integer divide by zero" );
break;
case EXCEPTION_INT_OVERFLOW:
detail::report_error( execution_exception::system_error, "integer overflow" );
break;
case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
detail::report_error( execution_exception::system_error, "array bounds exceeded" );
break;
case EXCEPTION_FLT_DIVIDE_BY_ZERO:
detail::report_error( execution_exception::system_error, "floating point divide by zero" );
break;
case EXCEPTION_FLT_STACK_CHECK:
detail::report_error( execution_exception::system_error,
"stack overflowed or underflowed as the result of a floating-point operation" );
break;
case EXCEPTION_FLT_DENORMAL_OPERAND:
detail::report_error( execution_exception::system_error,
"operand of floating point operation is denormal" );
break;
# if 0 // !! ??
case EXCEPTION_FLT_INEXACT_RESULT:
detail::report_error( execution_exception::system_error,
"result of a floating-point operation cannot be represented exactly" );
break;
#endif
case EXCEPTION_FLT_OVERFLOW:
detail::report_error( execution_exception::system_error,
"exponent of a floating-point operation is greater than the magnitude allowed by the corresponding type" );
break;
case EXCEPTION_FLT_UNDERFLOW:
detail::report_error( execution_exception::system_error,
"exponent of a floating-point operation is less than the magnitude allowed by the corresponding type" );
break;
case EXCEPTION_FLT_INVALID_OPERATION:
detail::report_error( execution_exception::system_error, "floating point error" );
break;
case EXCEPTION_BREAKPOINT:
detail::report_error( execution_exception::system_error, "breakpoint encountered" );
break;
default:
detail::report_error( execution_exception::system_error, "unrecognized exception. Id: 0x%08lx", m_se_id );
break;
}
}
//____________________________________________________________________________//
// ************************************************************************** //
// ************** assert_reporting_function ************** //
// ************************************************************************** //
int BOOST_TEST_CALL_DECL
assert_reporting_function( int reportType, char* userMessage, int* )
{
switch( reportType ) {
case BOOST_TEST_CRT_ASSERT:
detail::report_error( execution_exception::user_error, userMessage );
return 1; // return value and retVal are not important since we never reach this line
case BOOST_TEST_CRT_ERROR:
detail::report_error( execution_exception::system_error, userMessage );
return 1; // return value and retVal are not important since we never reach this line
default:
return 0; // use usual reporting method
}
} // assert_reporting_function
//____________________________________________________________________________//
void BOOST_TEST_CALL_DECL
invalid_param_handler( wchar_t const* /* expr */,
wchar_t const* /* func */,
wchar_t const* /* file */,
unsigned int /* line */,
uintptr_t /* reserved */)
{
detail::report_error( execution_exception::user_error,
"Invalid parameter detected by C runtime library" );
}
//____________________________________________________________________________//
void BOOST_TEST_CALL_DECL
switch_fp_exceptions( bool on_off )
{
if( !on_off )
_clearfp();
int cw = ::_controlfp( 0, 0 );
int exceptions_mask = EM_INVALID|EM_DENORMAL|EM_ZERODIVIDE|EM_OVERFLOW|EM_UNDERFLOW;
if( on_off )
cw &= ~exceptions_mask; // Set the exception masks on, turn exceptions off
else
cw |= exceptions_mask; // Set the exception masks off, turn exceptions on
if( on_off )
_clearfp();
// Set the control word
::_controlfp( cw, MCW_EM );
}
//____________________________________________________________________________//
} // namespace detail
// ************************************************************************** //
// ************** execution_monitor::catch_signals ************** //
// ************************************************************************** //
int
execution_monitor::catch_signals( unit_test::callback0<int> const& F )
{
_invalid_parameter_handler old_iph = _invalid_parameter_handler();
BOOST_TEST_CRT_HOOK_TYPE old_crt_hook;
if( !p_catch_system_errors )
_set_se_translator( &detail::seh_catch_preventer );
else {
if( !!p_detect_fp_exceptions )
detail::switch_fp_exceptions( true );
old_crt_hook = BOOST_TEST_CRT_SET_HOOK( &detail::assert_reporting_function );
old_iph = _set_invalid_parameter_handler(
reinterpret_cast<_invalid_parameter_handler>( &detail::invalid_param_handler ) );
}
detail::system_signal_exception SSE( this );
int ret_val = 0;
__try {
__try {
ret_val = detail::do_invoke( m_custom_translators, F );
}
__except( SSE( GetExceptionCode(), GetExceptionInformation() ) ) {
throw SSE;
}
}
__finally {
if( !!p_catch_system_errors ) {
if( !!p_detect_fp_exceptions )
detail::switch_fp_exceptions( false );
BOOST_TEST_CRT_SET_HOOK( old_crt_hook );
_set_invalid_parameter_handler( old_iph );
}
}
return ret_val;
}
//____________________________________________________________________________//
#else // default signal handler
namespace detail {
class system_signal_exception {
public:
void report() const {}
};
} // namespace detail
int
execution_monitor::catch_signals( unit_test::callback0<int> const& F )
{
return detail::do_invoke( m_custom_translators , F );
}
//____________________________________________________________________________//
#endif // choose signal handler
// ************************************************************************** //
// ************** execution_monitor::execute ************** //
// ************************************************************************** //
int
execution_monitor::execute( unit_test::callback0<int> const& F )
{
if( debug::under_debugger() )
p_catch_system_errors.value = false;
try {
return catch_signals( F );
}
// Catch-clause reference arguments are a bit different from function
// arguments (ISO 15.3 paragraphs 18 & 19). Apparently const isn't
// required. Programmers ask for const anyhow, so we supply it. That's
// easier than answering questions about non-const usage.
catch( char const* ex )
{ detail::report_error( execution_exception::cpp_exception_error, "C string: %s", ex ); }
catch( std::string const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::string: %s", ex.c_str() ); }
// std:: exceptions
catch( std::bad_alloc const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::bad_alloc: %s", ex.what() ); }
#if BOOST_WORKAROUND(__BORLANDC__, <= 0x0551)
catch( std::bad_cast const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::bad_cast" ); }
catch( std::bad_typeid const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::bad_typeid" ); }
#else
catch( std::bad_cast const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::bad_cast: %s", ex.what() ); }
catch( std::bad_typeid const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::bad_typeid: %s", ex.what() ); }
#endif
catch( std::bad_exception const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::bad_exception: %s", ex.what() ); }
catch( std::domain_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::domain_error: %s", ex.what() ); }
catch( std::invalid_argument const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::invalid_argument: %s", ex.what() ); }
catch( std::length_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::length_error: %s", ex.what() ); }
catch( std::out_of_range const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::out_of_range: %s", ex.what() ); }
catch( std::range_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::range_error: %s", ex.what() ); }
catch( std::overflow_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::overflow_error: %s", ex.what() ); }
catch( std::underflow_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::underflow_error: %s", ex.what() ); }
catch( std::logic_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::logic_error: %s", ex.what() ); }
catch( std::runtime_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::runtime_error: %s", ex.what() ); }
catch( std::exception const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "std::exception: %s", ex.what() ); }
catch( system_error const& ex )
{ detail::report_error( execution_exception::cpp_exception_error, "system_error produced by: %s: %s",
ex.p_failed_exp.get(),
std::strerror( ex.p_errno ) ); }
catch( detail::system_signal_exception const& ex )
{ ex.report(); }
catch( execution_aborted const& )
{ return 0; }
catch( execution_exception const& )
{ throw; }
catch( ... )
{ detail::report_error( execution_exception::cpp_exception_error, "unknown type" ); }
return 0; // never reached; supplied to quiet compiler warnings
} // execute
//____________________________________________________________________________//
// ************************************************************************** //
// ************** system_error ************** //
// ************************************************************************** //
system_error::system_error( char const* exp )
#ifdef UNDER_CE
: p_errno( GetLastError() )
#else
: p_errno( errno )
#endif
, p_failed_exp( exp )
{}
//____________________________________________________________________________//
} // namespace boost
//____________________________________________________________________________//
#include <boost/test/detail/enable_warnings.hpp>
#endif // BOOST_TEST_EXECUTION_MONITOR_IPP_012205GER
|
__label__pos
| 0.992806 |
≡ Menu
ifconfig command
Ifconfig: 7 Examples To Configure Network Interface
Photo courtesy of new1mproved This article is written by Lakshmanan G Ifconfig command is used to configure network interfaces. ifconfig stands for interface configurator. Ifconfig is widely used to initialize the network interface and to enable or disable the interfaces. In this article, let us review 7 common usages of ifconfig command. Ifconfig, when invoked [...]
{ 7 comments }
|
__label__pos
| 0.99002 |
Sign up ×
TeX - LaTeX Stack Exchange is a question and answer site for users of TeX, LaTeX, ConTeXt, and related typesetting systems. It's 100% free, no registration required.
I created a mindmap according to this example: http://www.texample.net/tikz/examples/computer-science-mindmap/
Sometimes the branches contain too much information so that the map looks ugly like in this example here:
enter image description here
This is the code I used:
\documentclass[landscape]{article}
\usepackage{tikz}
\usetikzlibrary{mindmap,trees}
\begin{document}
\pagestyle{empty}
\begin{tikzpicture}
\path[mindmap,concept color=black,text=white]
node[concept] {Main Topic}
[clockwise from=0]
child[concept color=green!50!black] {
node[concept] {definition}
child { node[concept] {this is a very long sentence ... very long sentence ... and it is even longer ...} }
child { node[concept] {this is a very long sentence ... very long sentence ... and it is even longer ...} }
}
child[concept color=blue] { node[concept] {Subtopic 1} }
child[concept color=red] { node[concept] {Subtopic 2} }
child[concept color=orange] { node[concept] {Subtopic 3} }
child[concept color=purple] { node[concept] {Subtopic 4} }
child[concept color=brown] { node[concept] {Subtopic 5} };
\end{tikzpicture}
\end{document}
Is there a possibility to create branches that are not displayed as a circle but just as a line like in this example, simply without the pictures?
enter image description here
share|improve this question
1
yes there is a possibility. but alas I don't know how to do it. – Nicholas Hamilton Dec 25 '12 at 17:23
@ADP I don't know if that helps the OP in no way, or if it hinders him/her instead – puk Jun 11 '13 at 6:12
@puk You may be right, but, you may also be wrong. – Nicholas Hamilton Jun 11 '13 at 21:48
1 Answer 1
up vote 18 down vote accepted
Manually
One rather manual way would be to name the node with “Definition”, e.g. (def), and use normal nodes that get positioned right of def.
I have created a style for these so called “non-concepts”:
• rectangle (default),
• text width=12em,
• execute at begin node=\footnotesize instead of font=\footnotesize (which does not perfectly combine with text width.
Further more I created a style cncc east for the to path from def to those non-concepts.
It consists of
• out=0 and in=180, and
• a to path.
As the usage of to inside the to path declaration failed, I fell back to the lower version \tikz@to@curve@path which calculates the path from
• tikztostart and
• tikztotarget
which are set previously to the .east and respectively the .south west anchor.
Additionally the .south east corner of the target node is used to finalize the line below the node.
Code
\documentclass[tikz, border=2pt]{standalone}
\usetikzlibrary{mindmap,trees,positioning}
\makeatletter
\tikzset{
non-concept/.style={
rectangle,
execute at begin node=\footnotesize,
text width=12em,
},
cncc east/.style={% concept-non-concept-connection
% where the non-concept is east of the concept
out=0,
in=180,
to path={
\pgfextra{
\edef\tikztostart{\tikztostart.east}
\edef\tikztotargetB{\tikztotarget.south east}
\edef\tikztotarget{\tikztotarget.south west}
}
\tikz@to@curve@path% needs \makeatletter and \makeatother
-- (\tikztotargetB)
}
}
}
\makeatother
\begin{document}
\pagestyle{empty}
\begin{tikzpicture}
\path[mindmap, concept color=black, text=white]
node[concept] {Main Topic}[clockwise from=0]% named \___/ node
child[concept color=green!50!black] { node[concept] (def) {definition} }
child[concept color=blue] { node[concept] {Subtopic 1} }
child[concept color=red] { node[concept] {Subtopic 2} }
child[concept color=orange] { node[concept] {Subtopic 3} }
child[concept color=purple] { node[concept] {Subtopic 4} }
child[concept color=brown] { node[concept] {Subtopic 5} };
\tikzset{
every node/.style=non-concept,
node distance=1ex,
}
\node[right=1cm of def, anchor=south west] (know)
{What does each person know and not know about my topic?};
\node[below=of know] (react)
{How will each person react?
What concerns will I need to overcome?};
\node[above=of know] (audi)
{Who exactly is my audience?
What is each listener's role and reason for attending?};
\draw[
line width=.8pt,
shorten <=.06em,
]
(def) edge[cncc east] (know)
edge[cncc east] (react)
edge[cncc east] (audi);
\end{tikzpicture}
\end{document}
Output
enter image description here
(Kind of) automatically
I prefer the manual way as described above.
Setting the edge of parent path is not a big problem and is even easier.
But assigning the right styles at the right level is very annoying and one has to write up a custom growth function.
Code
\documentclass[tikz, border=2pt]{standalone}
\usetikzlibrary{mindmap,trees,positioning}
\makeatletter
\tikzset{
non-concept/.style={
rectangle,
text width=12em,
text=black,
align=left,
},
cncc east/.style={
edge from parent path={
(\tikzparentnode.east) to[out=0, in=180] (\tikzchildnode.south west)
-- (\tikzchildnode.south east)
}
}
}
\makeatother
\begin{document}
\pagestyle{empty}
\begin{tikzpicture}
\path[mindmap, concept color=black, text=white]
node[concept] {Main Topic}[clockwise from=0]% named \___/ node
child[concept color=green!50!black] {
node[concept] (def) {definition}
[
grow=right,
sibling distance=14ex,
]
child[level distance=5cm] { node[non-concept] {What does each person know and not know about my topic?} edge from parent[cncc east] }
child[level distance=5cm] { node[non-concept] {How will each person react? What concerns will I need to overcome?} edge from parent[cncc east] }
child[level distance=5cm] { node[non-concept] {Who exactly is my audience? What is each listener's role and reason for attending?} edge from parent[cncc east] }
}
child[concept color=blue] { node[concept] {Subtopic 1} }
child[concept color=red] { node[concept] {Subtopic 2} }
child[concept color=orange] { node[concept] {Subtopic 3} }
child[concept color=purple] { node[concept] {Subtopic 4} }
child[concept color=brown] { node[concept] {Subtopic 5} };
\end{tikzpicture}
\end{document}
Output
enter image description here
share|improve this answer
1
Many thanks for your great answer! I took the first method and it works perfectly. – Marco Dec 26 '12 at 8:41
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 1 |
React render engine
Found an error? Have a suggestion?Edit this page on GitHub
React is the render engine that we strongly suggest you should use for any new templates. The only reason it is not the default render engine is to stay backward compatible.
• It enables the possibility of debugging your template (this is not possible with Nunjucks).
• It provides better error stack traces.
• Provides better support for separating code into more manageable chunks/components.
• The readability of the template is much better compared to Nunjucks syntax.
• Better tool support for development.
• Introduces testability of components which is not possible with Nunjucks.
When writing React templates you decide whether to use CommonJS, ES5, or ES6 modules since everything is bundled together before the rendering process takes over. We use our own React renderer which can be found in the Generator React SDK. There you can find information about how the renderer works or how we transpile your template files.
Your React template always requires @asyncapi/generator-react-sdk as a dependency. @asyncapi/generator-react-sdk is required to access the File component which is required as a root component for a file to be rendered. Furthermore, it provides some common components to make your development easier, like Text or Indent.
Let's consider a basic React template as the one below called MyTemplate.js:
1
2
3
4
5
6
7
8
9
import { File, Text } from "@asyncapi/generator-react-sdk";
export default function({ asyncapi, params, originalAsyncAPI }) {
return (
<File name="asyncapi.md">
<Text>Some text that should render as is</Text>
</File>
);
}
The exported default function returns a File component as a root component which the generator uses to determine what file should be generated. In our case, we overwrite the default functionality of saving the file as MyTemplate.js but instead use the filename asyncapi.md. It is then specified that we should render Some text that should render as is\n within that file. Notice the \n character at the end, which is automatically added after the Text component.
For further information about components, props, etc, see the Generator React SDK
Common assumptions
1. Generator renders all files located in the template directory if they meet the following conditions:
• File is the root component
• The file is not in the list of nonRenderableFiles in the template configuration
2. New lines are automatically added after each Text component.
3. The props you have access to in the rendering function are:
• asyncapi which is a parsed spec file object. Read the API of the Parser to understand what structure you have access to in this parameter.
• originalAsyncAPI which is an original spec file before it is parsed.
• params that contain the parameters provided when generating.
4. All the file templates are supported where the variables are provided after the default props as listed above.
Debugging React template in VSCode
With React, it enables you to debug your templates. For Visual Studio Code, we have created a boilerplate launch configuration to enable debugging in your template. Add the following launch configuration:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
{
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Debug template",
"timeout": 10000,
"sourceMaps": true,
"args": [
"./asyncapi.yml",
"./template",
"--output",
"./output",
"--install",
"--force-write"
],
"program": "ag"
}
]
}
Now replace ./asyncapi.yml with your document of choice. Replace ./template with the path to your React template. You can now debug your template by adding any breakpoints you want and inspecting your code.
Was this helpful?
Help us improve the docs by adding your contribution.
OR
Github:AsyncAPICreate Issue on GitHub
|
__label__pos
| 0.883604 |
LATEST VERSION: 9.5.1 - RELEASE NOTES
Pivotal GemFire® v9.5
Slow distributed-ack Messages
In systems with distributed-ack regions, a sudden large number of distributed-no-ack operations can cause distributed-ack operations to take a long time to complete.
The distributed-no-ack operations can come from anywhere. They may be updates to distributed-no-ack regions or they may be other distributed-no-ack operations, like destroys, performed on any region in the cache, including the distributed-ack regions.
The main reasons why a large number of distributed-no-ack messages may delay distributed-ack operations are:
• For any single socket connection, all operations are executed serially. If there are any other operations buffered for transmission when a distributed-ack is sent, the distributed-ack operation must wait to get to the front of the line before being transmitted. Of course, the operation’s calling process is also left waiting.
• The distributed-no-ack messages are buffered by their threads before transmission. If many messages are buffered and then sent to the socket at once, the line for transmission might be very long.
You can take these steps to reduce the impact of this problem:
1. If you’re using TCP, check whether you have socket conservation enabled for your members. It is configured by setting the GemFire property conserve-sockets to true. If enabled, each application’s threads will share sockets unless you override the setting at the thread level. Work with your application programmers to see whether you might disable sharing entirely or at least for the threads that perform distributed-ack operations. These include operations on distributed-ack regions and also netSearches performed on regions of any distributed scope. (Note: netSearch is only performed on regions with a data-policy of empty, normal and preloaded.) If you give each thread that performs distributed-ack operations its own socket, you effectively let it scoot to the front of the line ahead of the distributed-no-ack operations that are being performed by other threads. The thread-level override is done by calling the DistributedSystem.setThreadsSocketPolicy(false) method.
2. Reduce your buffer sizes to slow down the distributed-no-ack operations. These changes slow down the threads performing distributed-no-ack operations and allow the thread doing the distributed-ack operations to be sent in a more timely manner.
• If you’re using UDP (you either have multicast enabled regions or have set disable-tcp to true in gemfire.properties), consider reducing the byteAllowance of mcast-flow-control to something smaller than the default of 3.5 megabytes.
• If you’re using TCP/IP, reduce the socket-buffer-size in gemfire.properties.
|
__label__pos
| 0.601651 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.