file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
iso_8859_9.rs | pub fn charmap() -> [&'static str,.. 256]{ return ["\x00", // 0x0
"\x01", // 0x1
"\x02", // 0x2
"\x03", // 0x3
"\x04", // 0x4
"\x05", // 0x5
"\x06", // 0x6
"\x07", // 0x7
"\x08", // 0x8
"\t", // 0x9
"\n", // 0xa
"\x0b", // 0xb
"\x0c", // 0xc
"\r", // 0xd
"\x0e", // 0xe
"\x0f", // 0xf
"\x10", // 0x10
"\x11", // 0x11
"\x12", // 0x12
"\x13", // 0x13
"\x14", // 0x14
"\x15", // 0x15
"\x16", // 0x16
"\x17", // 0x17
"\x18", // 0x18
"\x19", // 0x19
"\x1a", // 0x1a
"\x1b", // 0x1b
"\x1c", // 0x1c
"\x1d", // 0x1d
"\x1e", // 0x1e
"\x1f", // 0x1f
" ", // 0x20
"!", // 0x21
"\"", // 0x22
"#", // 0x23
"$", // 0x24
"%", // 0x25
"&", // 0x26
"'", // 0x27
"(", // 0x28
")", // 0x29
"*", // 0x2a
"+", // 0x2b
",", // 0x2c
"-", // 0x2d
".", // 0x2e
"/", // 0x2f
"0", // 0x30
"1", // 0x31
"2", // 0x32
"3", // 0x33
"4", // 0x34
"5", // 0x35
"6", // 0x36
"7", // 0x37
"8", // 0x38
"9", // 0x39
":", // 0x3a
";", // 0x3b
"<", // 0x3c
"=", // 0x3d
">", // 0x3e
"?", // 0x3f
"@", // 0x40
"A", // 0x41
"B", // 0x42
"C", // 0x43
"D", // 0x44
"E", // 0x45
"F", // 0x46
"G", // 0x47
"H", // 0x48
"I", // 0x49
"J", // 0x4a
"K", // 0x4b
"L", // 0x4c
"M", // 0x4d
"N", // 0x4e
"O", // 0x4f
"P", // 0x50
"Q", // 0x51
"R", // 0x52
"S", // 0x53
"T", // 0x54
"U", // 0x55
"V", // 0x56
"W", // 0x57
"X", // 0x58
"Y", // 0x59
"Z", // 0x5a
"[", // 0x5b
"\\", // 0x5c
"]", // 0x5d
"^", // 0x5e
"_", // 0x5f
"`", // 0x60
"a", // 0x61
"b", // 0x62
"c", // 0x63
"d", // 0x64
"e", // 0x65
"f", // 0x66
"g", // 0x67
"h", // 0x68
"i", // 0x69
"j", // 0x6a
"k", // 0x6b
"l", // 0x6c
"m", // 0x6d
"n", // 0x6e
"o", // 0x6f
"p", // 0x70
"q", // 0x71
"r", // 0x72
"s", // 0x73
"t", // 0x74
"u", // 0x75
"v", // 0x76
"w", // 0x77
"x", // 0x78
"y", // 0x79
"z", // 0x7a
"{", // 0x7b
"|", // 0x7c
"}", // 0x7d
"~", // 0x7e
"\x7f", // 0x7f
"\x80", // 0x80
"\x81", // 0x81
"\x82", // 0x82
"\x83", // 0x83
"\x84", // 0x84
"\x85", // 0x85
"\x86", // 0x86
"\x87", // 0x87
"\x88", // 0x88
"\x89", // 0x89
"\x8a", // 0x8a
"\x8b", // 0x8b
"\x8c", // 0x8c
"\x8d", // 0x8d
"\x8e", // 0x8e
"\x8f", // 0x8f
"\x90", // 0x90
"\x91", // 0x91
"\x92", // 0x92
"\x93", // 0x93
"\x94", // 0x94
"\x95", // 0x95
"\x96", // 0x96
"\x97", // 0x97
"\x98", // 0x98
"\x99", // 0x99
"\x9a", // 0x9a
"\x9b", // 0x9b
"\x9c", // 0x9c
"\x9d", // 0x9d
"\x9e", // 0x9e
"\x9f", // 0x9f
"\xa0", // 0xa0
"\xa1", // 0xa1
"\xa2", // 0xa2
"\xa3", // 0xa3
"\xa4", // 0xa4
"\xa5", // 0xa5
"\xa6", // 0xa6
"\xa7", // 0xa7
"\xa8", // 0xa8
"\xa9", // 0xa9
"\xaa", // 0xaa
"\xab", // 0xab
"\xac", // 0xac
"\xad", // 0xad
"\xae", // 0xae
"\xaf", // 0xaf
"\xb0", // 0xb0
"\xb1", // 0xb1
"\xb2", // 0xb2
"\xb3", // 0xb3
"\xb4", // 0xb4
"\xb5", // 0xb5
"\xb6", // 0xb6
"\xb7", // 0xb7
"\xb8", // 0xb8
"\xb9", // 0xb9
"\xba", // 0xba
"\xbb", // 0xbb
"\xbc", // 0xbc
"\xbd", // 0xbd
"\xbe", // 0xbe
"\xbf", // 0xbf
"\xc0", // 0xc0
"\xc1", // 0xc1
"\xc2", // 0xc2
"\xc3", // 0xc3
"\xc4", // 0xc4
"\xc5", // 0xc5
"\xc6", // 0xc6 | "\xcb", // 0xcb
"\xcc", // 0xcc
"\xcd", // 0xcd
"\xce", // 0xce
"\xcf", // 0xcf
"\u011e", // 0xd0
"\xd1", // 0xd1
"\xd2", // 0xd2
"\xd3", // 0xd3
"\xd4", // 0xd4
"\xd5", // 0xd5
"\xd6", // 0xd6
"\xd7", // 0xd7
"\xd8", // 0xd8
"\xd9", // 0xd9
"\xda", // 0xda
"\xdb", // 0xdb
"\xdc", // 0xdc
"\u0130", // 0xdd
"\u015e", // 0xde
"\xdf", // 0xdf
"\xe0", // 0xe0
"\xe1", // 0xe1
"\xe2", // 0xe2
"\xe3", // 0xe3
"\xe4", // 0xe4
"\xe5", // 0xe5
"\xe6", // 0xe6
"\xe7", // 0xe7
"\xe8", // 0xe8
"\xe9", // 0xe9
"\xea", // 0xea
"\xeb", // 0xeb
"\xec", // 0xec
"\xed", // 0xed
"\xee", // 0xee
"\xef", // 0xef
"\u011f", // 0xf0
"\xf1", // 0xf1
"\xf2", // 0xf2
"\xf3", // 0xf3
"\xf4", // 0xf4
"\xf5", // 0xf5
"\xf6", // 0xf6
"\xf7", // 0xf7
"\xf8", // 0xf8
"\xf9", // 0xf9
"\xfa", // 0xfa
"\xfb", // 0xfb
"\xfc", // 0xfc
"\u0131", // 0xfd
"\u015f", // 0xfe
"\xff", // 0xff
];} | "\xc7", // 0xc7
"\xc8", // 0xc8
"\xc9", // 0xc9
"\xca", // 0xca | random_line_split |
iso_8859_9.rs | pub fn | () -> [&'static str,.. 256]{ return ["\x00", // 0x0
"\x01", // 0x1
"\x02", // 0x2
"\x03", // 0x3
"\x04", // 0x4
"\x05", // 0x5
"\x06", // 0x6
"\x07", // 0x7
"\x08", // 0x8
"\t", // 0x9
"\n", // 0xa
"\x0b", // 0xb
"\x0c", // 0xc
"\r", // 0xd
"\x0e", // 0xe
"\x0f", // 0xf
"\x10", // 0x10
"\x11", // 0x11
"\x12", // 0x12
"\x13", // 0x13
"\x14", // 0x14
"\x15", // 0x15
"\x16", // 0x16
"\x17", // 0x17
"\x18", // 0x18
"\x19", // 0x19
"\x1a", // 0x1a
"\x1b", // 0x1b
"\x1c", // 0x1c
"\x1d", // 0x1d
"\x1e", // 0x1e
"\x1f", // 0x1f
" ", // 0x20
"!", // 0x21
"\"", // 0x22
"#", // 0x23
"$", // 0x24
"%", // 0x25
"&", // 0x26
"'", // 0x27
"(", // 0x28
")", // 0x29
"*", // 0x2a
"+", // 0x2b
",", // 0x2c
"-", // 0x2d
".", // 0x2e
"/", // 0x2f
"0", // 0x30
"1", // 0x31
"2", // 0x32
"3", // 0x33
"4", // 0x34
"5", // 0x35
"6", // 0x36
"7", // 0x37
"8", // 0x38
"9", // 0x39
":", // 0x3a
";", // 0x3b
"<", // 0x3c
"=", // 0x3d
">", // 0x3e
"?", // 0x3f
"@", // 0x40
"A", // 0x41
"B", // 0x42
"C", // 0x43
"D", // 0x44
"E", // 0x45
"F", // 0x46
"G", // 0x47
"H", // 0x48
"I", // 0x49
"J", // 0x4a
"K", // 0x4b
"L", // 0x4c
"M", // 0x4d
"N", // 0x4e
"O", // 0x4f
"P", // 0x50
"Q", // 0x51
"R", // 0x52
"S", // 0x53
"T", // 0x54
"U", // 0x55
"V", // 0x56
"W", // 0x57
"X", // 0x58
"Y", // 0x59
"Z", // 0x5a
"[", // 0x5b
"\\", // 0x5c
"]", // 0x5d
"^", // 0x5e
"_", // 0x5f
"`", // 0x60
"a", // 0x61
"b", // 0x62
"c", // 0x63
"d", // 0x64
"e", // 0x65
"f", // 0x66
"g", // 0x67
"h", // 0x68
"i", // 0x69
"j", // 0x6a
"k", // 0x6b
"l", // 0x6c
"m", // 0x6d
"n", // 0x6e
"o", // 0x6f
"p", // 0x70
"q", // 0x71
"r", // 0x72
"s", // 0x73
"t", // 0x74
"u", // 0x75
"v", // 0x76
"w", // 0x77
"x", // 0x78
"y", // 0x79
"z", // 0x7a
"{", // 0x7b
"|", // 0x7c
"}", // 0x7d
"~", // 0x7e
"\x7f", // 0x7f
"\x80", // 0x80
"\x81", // 0x81
"\x82", // 0x82
"\x83", // 0x83
"\x84", // 0x84
"\x85", // 0x85
"\x86", // 0x86
"\x87", // 0x87
"\x88", // 0x88
"\x89", // 0x89
"\x8a", // 0x8a
"\x8b", // 0x8b
"\x8c", // 0x8c
"\x8d", // 0x8d
"\x8e", // 0x8e
"\x8f", // 0x8f
"\x90", // 0x90
"\x91", // 0x91
"\x92", // 0x92
"\x93", // 0x93
"\x94", // 0x94
"\x95", // 0x95
"\x96", // 0x96
"\x97", // 0x97
"\x98", // 0x98
"\x99", // 0x99
"\x9a", // 0x9a
"\x9b", // 0x9b
"\x9c", // 0x9c
"\x9d", // 0x9d
"\x9e", // 0x9e
"\x9f", // 0x9f
"\xa0", // 0xa0
"\xa1", // 0xa1
"\xa2", // 0xa2
"\xa3", // 0xa3
"\xa4", // 0xa4
"\xa5", // 0xa5
"\xa6", // 0xa6
"\xa7", // 0xa7
"\xa8", // 0xa8
"\xa9", // 0xa9
"\xaa", // 0xaa
"\xab", // 0xab
"\xac", // 0xac
"\xad", // 0xad
"\xae", // 0xae
"\xaf", // 0xaf
"\xb0", // 0xb0
"\xb1", // 0xb1
"\xb2", // 0xb2
"\xb3", // 0xb3
"\xb4", // 0xb4
"\xb5", // 0xb5
"\xb6", // 0xb6
"\xb7", // 0xb7
"\xb8", // 0xb8
"\xb9", // 0xb9
"\xba", // 0xba
"\xbb", // 0xbb
"\xbc", // 0xbc
"\xbd", // 0xbd
"\xbe", // 0xbe
"\xbf", // 0xbf
"\xc0", // 0xc0
"\xc1", // 0xc1
"\xc2", // 0xc2
"\xc3", // 0xc3
"\xc4", // 0xc4
"\xc5", // 0xc5
"\xc6", // 0xc6
"\xc7", // 0xc7
"\xc8", // 0xc8
"\xc9", // 0xc9
"\xca", // 0xca
"\xcb", // 0xcb
"\xcc", // 0xcc
"\xcd", // 0xcd
"\xce", // 0xce
"\xcf", // 0xcf
"\u011e", // 0xd0
"\xd1", // 0xd1
"\xd2", // 0xd2
"\xd3", // 0xd3
"\xd4", // 0xd4
"\xd5", // 0xd5
"\xd6", // 0xd6
"\xd7", // 0xd7
"\xd8", // 0xd8
"\xd9", // 0xd9
"\xda", // 0xda
"\xdb", // 0xdb
"\xdc", // 0xdc
"\u0130", // 0xdd
"\u015e", // 0xde
"\xdf", // 0xdf
"\xe0", // 0xe0
"\xe1", // 0xe1
"\xe2", // 0xe2
"\xe3", // 0xe3
"\xe4", // 0xe4
"\xe5", // 0xe5
"\xe6", // 0xe6
"\xe7", // 0xe7
"\xe8", // 0xe8
"\xe9", // 0xe9
"\xea", // 0xea
"\xeb", // 0xeb
"\xec", // 0xec
"\xed", // 0xed
"\xee", // 0xee
"\xef", // 0xef
"\u011f", // 0xf0
"\xf1", // 0xf1
"\xf2", // 0xf2
"\xf3", // 0xf3
"\xf4", // 0xf4
"\xf5", // 0xf5
"\xf6", // 0xf6
"\xf7", // 0xf7
"\xf8", // 0xf8
"\xf9", // 0xf9
"\xfa", // 0xfa
"\xfb", // 0xfb
"\xfc", // 0xfc
"\u0131", // 0xfd
"\u015f", // 0xfe
"\xff", // 0xff
];} | charmap | identifier_name |
iso_8859_9.rs | pub fn charmap() -> [&'static str,.. 256] | "\x14", // 0x14
"\x15", // 0x15
"\x16", // 0x16
"\x17", // 0x17
"\x18", // 0x18
"\x19", // 0x19
"\x1a", // 0x1a
"\x1b", // 0x1b
"\x1c", // 0x1c
"\x1d", // 0x1d
"\x1e", // 0x1e
"\x1f", // 0x1f
" ", // 0x20
"!", // 0x21
"\"", // 0x22
"#", // 0x23
"$", // 0x24
"%", // 0x25
"&", // 0x26
"'", // 0x27
"(", // 0x28
")", // 0x29
"*", // 0x2a
"+", // 0x2b
",", // 0x2c
"-", // 0x2d
".", // 0x2e
"/", // 0x2f
"0", // 0x30
"1", // 0x31
"2", // 0x32
"3", // 0x33
"4", // 0x34
"5", // 0x35
"6", // 0x36
"7", // 0x37
"8", // 0x38
"9", // 0x39
":", // 0x3a
";", // 0x3b
"<", // 0x3c
"=", // 0x3d
">", // 0x3e
"?", // 0x3f
"@", // 0x40
"A", // 0x41
"B", // 0x42
"C", // 0x43
"D", // 0x44
"E", // 0x45
"F", // 0x46
"G", // 0x47
"H", // 0x48
"I", // 0x49
"J", // 0x4a
"K", // 0x4b
"L", // 0x4c
"M", // 0x4d
"N", // 0x4e
"O", // 0x4f
"P", // 0x50
"Q", // 0x51
"R", // 0x52
"S", // 0x53
"T", // 0x54
"U", // 0x55
"V", // 0x56
"W", // 0x57
"X", // 0x58
"Y", // 0x59
"Z", // 0x5a
"[", // 0x5b
"\\", // 0x5c
"]", // 0x5d
"^", // 0x5e
"_", // 0x5f
"`", // 0x60
"a", // 0x61
"b", // 0x62
"c", // 0x63
"d", // 0x64
"e", // 0x65
"f", // 0x66
"g", // 0x67
"h", // 0x68
"i", // 0x69
"j", // 0x6a
"k", // 0x6b
"l", // 0x6c
"m", // 0x6d
"n", // 0x6e
"o", // 0x6f
"p", // 0x70
"q", // 0x71
"r", // 0x72
"s", // 0x73
"t", // 0x74
"u", // 0x75
"v", // 0x76
"w", // 0x77
"x", // 0x78
"y", // 0x79
"z", // 0x7a
"{", // 0x7b
"|", // 0x7c
"}", // 0x7d
"~", // 0x7e
"\x7f", // 0x7f
"\x80", // 0x80
"\x81", // 0x81
"\x82", // 0x82
"\x83", // 0x83
"\x84", // 0x84
"\x85", // 0x85
"\x86", // 0x86
"\x87", // 0x87
"\x88", // 0x88
"\x89", // 0x89
"\x8a", // 0x8a
"\x8b", // 0x8b
"\x8c", // 0x8c
"\x8d", // 0x8d
"\x8e", // 0x8e
"\x8f", // 0x8f
"\x90", // 0x90
"\x91", // 0x91
"\x92", // 0x92
"\x93", // 0x93
"\x94", // 0x94
"\x95", // 0x95
"\x96", // 0x96
"\x97", // 0x97
"\x98", // 0x98
"\x99", // 0x99
"\x9a", // 0x9a
"\x9b", // 0x9b
"\x9c", // 0x9c
"\x9d", // 0x9d
"\x9e", // 0x9e
"\x9f", // 0x9f
"\xa0", // 0xa0
"\xa1", // 0xa1
"\xa2", // 0xa2
"\xa3", // 0xa3
"\xa4", // 0xa4
"\xa5", // 0xa5
"\xa6", // 0xa6
"\xa7", // 0xa7
"\xa8", // 0xa8
"\xa9", // 0xa9
"\xaa", // 0xaa
"\xab", // 0xab
"\xac", // 0xac
"\xad", // 0xad
"\xae", // 0xae
"\xaf", // 0xaf
"\xb0", // 0xb0
"\xb1", // 0xb1
"\xb2", // 0xb2
"\xb3", // 0xb3
"\xb4", // 0xb4
"\xb5", // 0xb5
"\xb6", // 0xb6
"\xb7", // 0xb7
"\xb8", // 0xb8
"\xb9", // 0xb9
"\xba", // 0xba
"\xbb", // 0xbb
"\xbc", // 0xbc
"\xbd", // 0xbd
"\xbe", // 0xbe
"\xbf", // 0xbf
"\xc0", // 0xc0
"\xc1", // 0xc1
"\xc2", // 0xc2
"\xc3", // 0xc3
"\xc4", // 0xc4
"\xc5", // 0xc5
"\xc6", // 0xc6
"\xc7", // 0xc7
"\xc8", // 0xc8
"\xc9", // 0xc9
"\xca", // 0xca
"\xcb", // 0xcb
"\xcc", // 0xcc
"\xcd", // 0xcd
"\xce", // 0xce
"\xcf", // 0xcf
"\u011e", // 0xd0
"\xd1", // 0xd1
"\xd2", // 0xd2
"\xd3", // 0xd3
"\xd4", // 0xd4
"\xd5", // 0xd5
"\xd6", // 0xd6
"\xd7", // 0xd7
"\xd8", // 0xd8
"\xd9", // 0xd9
"\xda", // 0xda
"\xdb", // 0xdb
"\xdc", // 0xdc
"\u0130", // 0xdd
"\u015e", // 0xde
"\xdf", // 0xdf
"\xe0", // 0xe0
"\xe1", // 0xe1
"\xe2", // 0xe2
"\xe3", // 0xe3
"\xe4", // 0xe4
"\xe5", // 0xe5
"\xe6", // 0xe6
"\xe7", // 0xe7
"\xe8", // 0xe8
"\xe9", // 0xe9
"\xea", // 0xea
"\xeb", // 0xeb
"\xec", // 0xec
"\xed", // 0xed
"\xee", // 0xee
"\xef", // 0xef
"\u011f", // 0xf0
"\xf1", // 0xf1
"\xf2", // 0xf2
"\xf3", // 0xf3
"\xf4", // 0xf4
"\xf5", // 0xf5
"\xf6", // 0xf6
"\xf7", // 0xf7
"\xf8", // 0xf8
"\xf9", // 0xf9
"\xfa", // 0xfa
"\xfb", // 0xfb
"\xfc", // 0xfc
"\u0131", // 0xfd
"\u015f", // 0xfe
"\xff", // 0xff
];} | { return ["\x00", // 0x0
"\x01", // 0x1
"\x02", // 0x2
"\x03", // 0x3
"\x04", // 0x4
"\x05", // 0x5
"\x06", // 0x6
"\x07", // 0x7
"\x08", // 0x8
"\t", // 0x9
"\n", // 0xa
"\x0b", // 0xb
"\x0c", // 0xc
"\r", // 0xd
"\x0e", // 0xe
"\x0f", // 0xf
"\x10", // 0x10
"\x11", // 0x11
"\x12", // 0x12
"\x13", // 0x13 | identifier_body |
macro_parser.rs | [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures | count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
box MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
}
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
let string = token::get_ident(bind_name);
p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
&string))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
sess.span_diagnostic.span_fatal(sp, &str[..])
}
Error(sp, str) => {
sess.span_diagnostic.span_fatal(sp, &str[..])
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(box MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
});
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (bb_eis.len() > 0 && next_eis.len() > 0)
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
(format!("{} ('{}')",
| }
&TtDelimited(_, ref delim) => { | random_line_split |
macro_parser.rs | a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: us | > TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
box MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
}
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
let string = token::get_ident(bind_name);
p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
&string))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
sess.span_diagnostic.span_fatal(sp, &str[..])
}
Error(sp, str) => {
sess.span_diagnostic.span_fatal(sp, &str[..])
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(box MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
});
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (bb_eis.len() > 0 && next_eis.len() > 0)
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
(format!("{} ('{}')",
| ize) - | identifier_name |
macro_parser.rs | a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
box MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
}
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
let string = token::get_ident(bind_name);
p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
&string))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
sess.span_diagnostic.span_fatal(sp, &str[..])
}
Error(sp, str) => {
sess.span_diagnostic.span_fatal(sp, &str[..])
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(box MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
});
}
TtToken(_, MatchNt(..)) => {
| Token(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (bb_eis.len() > 0 && next_eis.len() > 0)
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
(format!("{} ('{}')",
| // Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
Tt | conditional_block |
macro_crate_test.rs | // Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{self, TokenTree, Item, MetaItem, ImplItem, TraitItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::{self, token};
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro { () => (2) }
macro_rules! unexported_macro { () => (3) }
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
Modifier(Box::new(expand_into_foo)));
reg.register_syntax_extension(
token::intern("into_multi_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiModifier(Box::new(expand_into_foo_multi)));
reg.register_syntax_extension(
token::intern("duplicate"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiDecorator(Box::new(expand_duplicate)));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacEager::expr(quote_expr!(cx, 1))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacEager::expr(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_into_foo_multi(cx: &mut ExtCtxt,
sp: Span,
attr: &MetaItem,
it: Annotatable) -> Annotatable {
match it {
Annotatable::Item(it) => {
Annotatable::Item(P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo2 { Bar2, Baz2 }).unwrap()).clone()
}))
}
Annotatable::ImplItem(it) => {
quote_item!(cx, impl X { fn foo(&self) -> i32 { 42 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemImpl(_, _, _, _, _, mut items) => {
Annotatable::ImplItem(items.pop().expect("impl method not found"))
}
_ => unreachable!("impl parsed to something other than impl")
}
})
}
Annotatable::TraitItem(it) => {
quote_item!(cx, trait X { fn foo(&self) -> i32 { 0 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemTrait(_, _, _, mut items) => {
Annotatable::TraitItem(items.pop().expect("trait method not found"))
}
_ => unreachable!("trait parsed to something other than trait")
}
})
}
}
}
// Create a duplicate of the annotatable, based on the MetaItem
fn expand_duplicate(cx: &mut ExtCtxt,
sp: Span,
mi: &MetaItem,
it: &Annotatable,
push: &mut FnMut(Annotatable))
{
let copy_name = match mi.node {
ast::MetaItem_::MetaList(_, ref xs) => {
if let ast::MetaItem_::MetaWord(ref w) = xs[0].node {
token::str_to_ident(&w)
} else {
cx.span_err(mi.span, "Expected word");
return;
}
}
_ => {
cx.span_err(mi.span, "Expected list");
return;
}
};
// Duplicate the item but replace its ident by the MetaItem
match it.clone() {
Annotatable::Item(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::Item(P(new_it)));
}
Annotatable::ImplItem(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::ImplItem(P(new_it)));
}
Annotatable::TraitItem(tt) => |
}
}
pub fn foo() {}
| {
let mut new_it = (*tt).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::TraitItem(P(new_it)));
} | conditional_block |
macro_crate_test.rs | // Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{self, TokenTree, Item, MetaItem, ImplItem, TraitItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::{self, token};
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro { () => (2) }
macro_rules! unexported_macro { () => (3) }
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
Modifier(Box::new(expand_into_foo)));
reg.register_syntax_extension(
token::intern("into_multi_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiModifier(Box::new(expand_into_foo_multi)));
reg.register_syntax_extension(
token::intern("duplicate"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiDecorator(Box::new(expand_duplicate)));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacEager::expr(quote_expr!(cx, 1))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacEager::expr(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn | (cx: &mut ExtCtxt,
sp: Span,
attr: &MetaItem,
it: Annotatable) -> Annotatable {
match it {
Annotatable::Item(it) => {
Annotatable::Item(P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo2 { Bar2, Baz2 }).unwrap()).clone()
}))
}
Annotatable::ImplItem(it) => {
quote_item!(cx, impl X { fn foo(&self) -> i32 { 42 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemImpl(_, _, _, _, _, mut items) => {
Annotatable::ImplItem(items.pop().expect("impl method not found"))
}
_ => unreachable!("impl parsed to something other than impl")
}
})
}
Annotatable::TraitItem(it) => {
quote_item!(cx, trait X { fn foo(&self) -> i32 { 0 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemTrait(_, _, _, mut items) => {
Annotatable::TraitItem(items.pop().expect("trait method not found"))
}
_ => unreachable!("trait parsed to something other than trait")
}
})
}
}
}
// Create a duplicate of the annotatable, based on the MetaItem
fn expand_duplicate(cx: &mut ExtCtxt,
sp: Span,
mi: &MetaItem,
it: &Annotatable,
push: &mut FnMut(Annotatable))
{
let copy_name = match mi.node {
ast::MetaItem_::MetaList(_, ref xs) => {
if let ast::MetaItem_::MetaWord(ref w) = xs[0].node {
token::str_to_ident(&w)
} else {
cx.span_err(mi.span, "Expected word");
return;
}
}
_ => {
cx.span_err(mi.span, "Expected list");
return;
}
};
// Duplicate the item but replace its ident by the MetaItem
match it.clone() {
Annotatable::Item(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::Item(P(new_it)));
}
Annotatable::ImplItem(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::ImplItem(P(new_it)));
}
Annotatable::TraitItem(tt) => {
let mut new_it = (*tt).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::TraitItem(P(new_it)));
}
}
}
pub fn foo() {}
| expand_into_foo_multi | identifier_name |
macro_crate_test.rs | // Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{self, TokenTree, Item, MetaItem, ImplItem, TraitItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::{self, token};
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro { () => (2) }
macro_rules! unexported_macro { () => (3) }
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
Modifier(Box::new(expand_into_foo)));
reg.register_syntax_extension(
token::intern("into_multi_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiModifier(Box::new(expand_into_foo_multi)));
reg.register_syntax_extension(
token::intern("duplicate"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiDecorator(Box::new(expand_duplicate)));
} | cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacEager::expr(quote_expr!(cx, 1))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacEager::expr(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_into_foo_multi(cx: &mut ExtCtxt,
sp: Span,
attr: &MetaItem,
it: Annotatable) -> Annotatable {
match it {
Annotatable::Item(it) => {
Annotatable::Item(P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo2 { Bar2, Baz2 }).unwrap()).clone()
}))
}
Annotatable::ImplItem(it) => {
quote_item!(cx, impl X { fn foo(&self) -> i32 { 42 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemImpl(_, _, _, _, _, mut items) => {
Annotatable::ImplItem(items.pop().expect("impl method not found"))
}
_ => unreachable!("impl parsed to something other than impl")
}
})
}
Annotatable::TraitItem(it) => {
quote_item!(cx, trait X { fn foo(&self) -> i32 { 0 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemTrait(_, _, _, mut items) => {
Annotatable::TraitItem(items.pop().expect("trait method not found"))
}
_ => unreachable!("trait parsed to something other than trait")
}
})
}
}
}
// Create a duplicate of the annotatable, based on the MetaItem
fn expand_duplicate(cx: &mut ExtCtxt,
sp: Span,
mi: &MetaItem,
it: &Annotatable,
push: &mut FnMut(Annotatable))
{
let copy_name = match mi.node {
ast::MetaItem_::MetaList(_, ref xs) => {
if let ast::MetaItem_::MetaWord(ref w) = xs[0].node {
token::str_to_ident(&w)
} else {
cx.span_err(mi.span, "Expected word");
return;
}
}
_ => {
cx.span_err(mi.span, "Expected list");
return;
}
};
// Duplicate the item but replace its ident by the MetaItem
match it.clone() {
Annotatable::Item(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::Item(P(new_it)));
}
Annotatable::ImplItem(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::ImplItem(P(new_it)));
}
Annotatable::TraitItem(tt) => {
let mut new_it = (*tt).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::TraitItem(P(new_it)));
}
}
}
pub fn foo() {} |
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if !tts.is_empty() { | random_line_split |
macro_crate_test.rs | // Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{self, TokenTree, Item, MetaItem, ImplItem, TraitItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::{self, token};
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro { () => (2) }
macro_rules! unexported_macro { () => (3) }
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
Modifier(Box::new(expand_into_foo)));
reg.register_syntax_extension(
token::intern("into_multi_foo"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiModifier(Box::new(expand_into_foo_multi)));
reg.register_syntax_extension(
token::intern("duplicate"),
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
MultiDecorator(Box::new(expand_duplicate)));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacEager::expr(quote_expr!(cx, 1))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacEager::expr(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> |
fn expand_into_foo_multi(cx: &mut ExtCtxt,
sp: Span,
attr: &MetaItem,
it: Annotatable) -> Annotatable {
match it {
Annotatable::Item(it) => {
Annotatable::Item(P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo2 { Bar2, Baz2 }).unwrap()).clone()
}))
}
Annotatable::ImplItem(it) => {
quote_item!(cx, impl X { fn foo(&self) -> i32 { 42 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemImpl(_, _, _, _, _, mut items) => {
Annotatable::ImplItem(items.pop().expect("impl method not found"))
}
_ => unreachable!("impl parsed to something other than impl")
}
})
}
Annotatable::TraitItem(it) => {
quote_item!(cx, trait X { fn foo(&self) -> i32 { 0 } }).unwrap().and_then(|i| {
match i.node {
ast::ItemTrait(_, _, _, mut items) => {
Annotatable::TraitItem(items.pop().expect("trait method not found"))
}
_ => unreachable!("trait parsed to something other than trait")
}
})
}
}
}
// Create a duplicate of the annotatable, based on the MetaItem
fn expand_duplicate(cx: &mut ExtCtxt,
sp: Span,
mi: &MetaItem,
it: &Annotatable,
push: &mut FnMut(Annotatable))
{
let copy_name = match mi.node {
ast::MetaItem_::MetaList(_, ref xs) => {
if let ast::MetaItem_::MetaWord(ref w) = xs[0].node {
token::str_to_ident(&w)
} else {
cx.span_err(mi.span, "Expected word");
return;
}
}
_ => {
cx.span_err(mi.span, "Expected list");
return;
}
};
// Duplicate the item but replace its ident by the MetaItem
match it.clone() {
Annotatable::Item(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::Item(P(new_it)));
}
Annotatable::ImplItem(it) => {
let mut new_it = (*it).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::ImplItem(P(new_it)));
}
Annotatable::TraitItem(tt) => {
let mut new_it = (*tt).clone();
new_it.attrs.clear();
new_it.ident = copy_name;
push(Annotatable::TraitItem(P(new_it)));
}
}
}
pub fn foo() {}
| {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
} | identifier_body |
mod.rs | mod sender;
mod bus;
use std::error;
use std::fmt;
use std::collections::HashMap;
pub use self::sender::Reader;
#[derive(Debug)]
pub enum BusError {
NoSuchChannel(String),
}
impl fmt::Display for BusError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
BusError::NoSuchChannel(ref chan) => |
}
}
}
impl error::Error for BusError {
fn description(&self) -> &str {
match *self {
BusError::NoSuchChannel(_) => "No such channel",
}
}
}
pub struct BusSystem {
busses: HashMap<String, bus::Bus<f64>>,
}
impl BusSystem {
// ok, there's a big fat leak : when there are no listenner to a bus, it stays in the map.
pub fn new() -> BusSystem {
BusSystem { busses: HashMap::new() }
}
// ideally sub<T> -> Receiver<T>
pub fn sub(&mut self, chan: &str) -> Reader<f64> {
self.busses
.entry(chan.to_string())
.or_insert(bus::Bus::new(0.0))
.subscribe()
}
pub fn publish(&mut self, chan: &str, value: f64) -> Result<(), BusError> {
self.busses.get_mut(chan).map(|c| c.publish(value)).ok_or(
BusError::NoSuchChannel(chan.to_string()),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
struct Stuff {
a: Reader<f64>,
b: Reader<f64>,
}
impl Stuff {
fn doit(&self) -> f64 {
self.a.value() + self.b.value()
}
}
#[test]
fn test_bus_system() {
let mut bus = BusSystem::new();
let stuff = Stuff {
a: bus.sub("a"),
b: bus.sub("b"),
};
bus.publish("a", 2.0).unwrap();
bus.publish("b", 4.0).unwrap();
assert_eq!(stuff.doit(), 6.0);
assert!(bus.publish("b", 5.0).is_ok());
assert!(bus.publish("d", 5.0).is_err());
}
}
| {
fmt.debug_struct("NoSuchChannel")
.field("channel", &chan)
.finish()
} | conditional_block |
mod.rs | mod sender;
mod bus;
use std::error;
use std::fmt;
use std::collections::HashMap;
pub use self::sender::Reader;
#[derive(Debug)]
pub enum BusError {
NoSuchChannel(String),
}
impl fmt::Display for BusError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
BusError::NoSuchChannel(ref chan) => {
fmt.debug_struct("NoSuchChannel")
.field("channel", &chan)
.finish()
}
}
}
}
impl error::Error for BusError {
fn description(&self) -> &str {
match *self {
BusError::NoSuchChannel(_) => "No such channel",
}
}
}
pub struct BusSystem {
busses: HashMap<String, bus::Bus<f64>>,
}
impl BusSystem {
// ok, there's a big fat leak : when there are no listenner to a bus, it stays in the map.
pub fn new() -> BusSystem {
BusSystem { busses: HashMap::new() }
}
// ideally sub<T> -> Receiver<T>
pub fn sub(&mut self, chan: &str) -> Reader<f64> {
self.busses
.entry(chan.to_string())
.or_insert(bus::Bus::new(0.0))
.subscribe()
}
pub fn publish(&mut self, chan: &str, value: f64) -> Result<(), BusError> {
self.busses.get_mut(chan).map(|c| c.publish(value)).ok_or(
BusError::NoSuchChannel(chan.to_string()),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
struct | {
a: Reader<f64>,
b: Reader<f64>,
}
impl Stuff {
fn doit(&self) -> f64 {
self.a.value() + self.b.value()
}
}
#[test]
fn test_bus_system() {
let mut bus = BusSystem::new();
let stuff = Stuff {
a: bus.sub("a"),
b: bus.sub("b"),
};
bus.publish("a", 2.0).unwrap();
bus.publish("b", 4.0).unwrap();
assert_eq!(stuff.doit(), 6.0);
assert!(bus.publish("b", 5.0).is_ok());
assert!(bus.publish("d", 5.0).is_err());
}
}
| Stuff | identifier_name |
mod.rs | mod sender;
mod bus;
use std::error;
use std::fmt;
use std::collections::HashMap;
pub use self::sender::Reader;
#[derive(Debug)]
pub enum BusError {
NoSuchChannel(String),
}
impl fmt::Display for BusError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
BusError::NoSuchChannel(ref chan) => {
fmt.debug_struct("NoSuchChannel")
.field("channel", &chan)
.finish()
}
}
}
}
impl error::Error for BusError {
fn description(&self) -> &str {
match *self {
BusError::NoSuchChannel(_) => "No such channel",
}
}
}
pub struct BusSystem {
busses: HashMap<String, bus::Bus<f64>>,
}
impl BusSystem {
// ok, there's a big fat leak : when there are no listenner to a bus, it stays in the map.
pub fn new() -> BusSystem {
BusSystem { busses: HashMap::new() }
}
// ideally sub<T> -> Receiver<T>
pub fn sub(&mut self, chan: &str) -> Reader<f64> |
pub fn publish(&mut self, chan: &str, value: f64) -> Result<(), BusError> {
self.busses.get_mut(chan).map(|c| c.publish(value)).ok_or(
BusError::NoSuchChannel(chan.to_string()),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
struct Stuff {
a: Reader<f64>,
b: Reader<f64>,
}
impl Stuff {
fn doit(&self) -> f64 {
self.a.value() + self.b.value()
}
}
#[test]
fn test_bus_system() {
let mut bus = BusSystem::new();
let stuff = Stuff {
a: bus.sub("a"),
b: bus.sub("b"),
};
bus.publish("a", 2.0).unwrap();
bus.publish("b", 4.0).unwrap();
assert_eq!(stuff.doit(), 6.0);
assert!(bus.publish("b", 5.0).is_ok());
assert!(bus.publish("d", 5.0).is_err());
}
}
| {
self.busses
.entry(chan.to_string())
.or_insert(bus::Bus::new(0.0))
.subscribe()
} | identifier_body |
mod.rs | mod sender;
mod bus;
use std::error;
use std::fmt;
use std::collections::HashMap;
pub use self::sender::Reader;
#[derive(Debug)]
pub enum BusError {
NoSuchChannel(String),
}
impl fmt::Display for BusError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
BusError::NoSuchChannel(ref chan) => {
fmt.debug_struct("NoSuchChannel")
.field("channel", &chan)
.finish()
}
}
}
}
impl error::Error for BusError {
fn description(&self) -> &str {
match *self {
BusError::NoSuchChannel(_) => "No such channel",
}
} | pub struct BusSystem {
busses: HashMap<String, bus::Bus<f64>>,
}
impl BusSystem {
// ok, there's a big fat leak : when there are no listenner to a bus, it stays in the map.
pub fn new() -> BusSystem {
BusSystem { busses: HashMap::new() }
}
// ideally sub<T> -> Receiver<T>
pub fn sub(&mut self, chan: &str) -> Reader<f64> {
self.busses
.entry(chan.to_string())
.or_insert(bus::Bus::new(0.0))
.subscribe()
}
pub fn publish(&mut self, chan: &str, value: f64) -> Result<(), BusError> {
self.busses.get_mut(chan).map(|c| c.publish(value)).ok_or(
BusError::NoSuchChannel(chan.to_string()),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
struct Stuff {
a: Reader<f64>,
b: Reader<f64>,
}
impl Stuff {
fn doit(&self) -> f64 {
self.a.value() + self.b.value()
}
}
#[test]
fn test_bus_system() {
let mut bus = BusSystem::new();
let stuff = Stuff {
a: bus.sub("a"),
b: bus.sub("b"),
};
bus.publish("a", 2.0).unwrap();
bus.publish("b", 4.0).unwrap();
assert_eq!(stuff.doit(), 6.0);
assert!(bus.publish("b", 5.0).is_ok());
assert!(bus.publish("d", 5.0).is_err());
}
} | } | random_line_split |
lib.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use std::{collections::BTreeSet, env};
pub mod api;
pub mod api_utils;
pub mod display_utils;
pub mod error;
pub mod filesystem;
pub mod nginx;
pub mod ostpool;
pub mod profile;
pub mod server;
pub mod snapshot;
pub mod stratagem;
pub mod update_repo_file;
pub fn parse_hosts(hosts: &[String]) -> Result<BTreeSet<String>, error::ImlManagerCliError> {
let parsed: Vec<BTreeSet<String>> = hosts
.iter()
.map(|x| hostlist_parser::parse(x))
.collect::<Result<_, _>>()?;
let union = parsed
.into_iter()
.fold(BTreeSet::new(), |acc, h| acc.union(&h).cloned().collect());
Ok(union)
}
fn exe_name() -> Option<String> |
pub fn selfname(suffix: Option<&str>) -> Option<String> {
match env::var("CLI_NAME") {
Ok(n) => suffix.map(|s| format!("{}-{}", n, s)).or_else(|| Some(n)),
Err(_) => exe_name(),
}
}
| {
Some(
std::env::current_exe()
.ok()?
.file_stem()?
.to_str()?
.to_string(),
)
} | identifier_body |
lib.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use std::{collections::BTreeSet, env};
pub mod api;
pub mod api_utils;
pub mod display_utils;
pub mod error;
pub mod filesystem;
pub mod nginx;
pub mod ostpool;
pub mod profile;
pub mod server;
pub mod snapshot;
pub mod stratagem;
pub mod update_repo_file;
pub fn parse_hosts(hosts: &[String]) -> Result<BTreeSet<String>, error::ImlManagerCliError> {
let parsed: Vec<BTreeSet<String>> = hosts
.iter()
.map(|x| hostlist_parser::parse(x))
.collect::<Result<_, _>>()?;
let union = parsed
.into_iter()
.fold(BTreeSet::new(), |acc, h| acc.union(&h).cloned().collect());
Ok(union)
}
fn exe_name() -> Option<String> {
Some(
std::env::current_exe()
.ok()?
.file_stem()?
.to_str()?
.to_string(),
)
} | }
} |
pub fn selfname(suffix: Option<&str>) -> Option<String> {
match env::var("CLI_NAME") {
Ok(n) => suffix.map(|s| format!("{}-{}", n, s)).or_else(|| Some(n)),
Err(_) => exe_name(), | random_line_split |
lib.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use std::{collections::BTreeSet, env};
pub mod api;
pub mod api_utils;
pub mod display_utils;
pub mod error;
pub mod filesystem;
pub mod nginx;
pub mod ostpool;
pub mod profile;
pub mod server;
pub mod snapshot;
pub mod stratagem;
pub mod update_repo_file;
pub fn | (hosts: &[String]) -> Result<BTreeSet<String>, error::ImlManagerCliError> {
let parsed: Vec<BTreeSet<String>> = hosts
.iter()
.map(|x| hostlist_parser::parse(x))
.collect::<Result<_, _>>()?;
let union = parsed
.into_iter()
.fold(BTreeSet::new(), |acc, h| acc.union(&h).cloned().collect());
Ok(union)
}
fn exe_name() -> Option<String> {
Some(
std::env::current_exe()
.ok()?
.file_stem()?
.to_str()?
.to_string(),
)
}
pub fn selfname(suffix: Option<&str>) -> Option<String> {
match env::var("CLI_NAME") {
Ok(n) => suffix.map(|s| format!("{}-{}", n, s)).or_else(|| Some(n)),
Err(_) => exe_name(),
}
}
| parse_hosts | identifier_name |
associated-types-no-suitable-supertrait.rs | // Check that we get an error when you use `<Self as Get>::Value` in
// the trait definition but `Self` does not, in fact, implement `Get`.
//
// See also associated-types-no-suitable-supertrait-2.rs, which checks
// that we see the same error if we get around to checking the default
// method body.
//
// See also run-pass/associated-types-projection-to-unrelated-trait.rs,
// which checks that the trait interface itself is not considered an
// error as long as all impls satisfy the constraint.
trait Get {
type Value;
}
trait Other {
fn | <U:Get>(&self, foo: U, bar: <Self as Get>::Value) {}
//~^ ERROR the trait bound `Self: Get` is not satisfied
}
impl<T:Get> Other for T {
fn uhoh<U:Get>(&self, foo: U, bar: <(T, U) as Get>::Value) {}
//~^ ERROR the trait bound `(T, U): Get` is not satisfied
}
fn main() { }
| uhoh | identifier_name |
associated-types-no-suitable-supertrait.rs | // Check that we get an error when you use `<Self as Get>::Value` in
// the trait definition but `Self` does not, in fact, implement `Get`.
//
// See also associated-types-no-suitable-supertrait-2.rs, which checks
// that we see the same error if we get around to checking the default
// method body.
//
// See also run-pass/associated-types-projection-to-unrelated-trait.rs,
// which checks that the trait interface itself is not considered an
// error as long as all impls satisfy the constraint.
trait Get {
type Value;
}
trait Other {
fn uhoh<U:Get>(&self, foo: U, bar: <Self as Get>::Value) {}
//~^ ERROR the trait bound `Self: Get` is not satisfied
}
impl<T:Get> Other for T {
fn uhoh<U:Get>(&self, foo: U, bar: <(T, U) as Get>::Value) |
//~^ ERROR the trait bound `(T, U): Get` is not satisfied
}
fn main() { }
| {} | identifier_body |
associated-types-no-suitable-supertrait.rs | // Check that we get an error when you use `<Self as Get>::Value` in
// the trait definition but `Self` does not, in fact, implement `Get`.
//
// See also associated-types-no-suitable-supertrait-2.rs, which checks
// that we see the same error if we get around to checking the default
// method body. | trait Get {
type Value;
}
trait Other {
fn uhoh<U:Get>(&self, foo: U, bar: <Self as Get>::Value) {}
//~^ ERROR the trait bound `Self: Get` is not satisfied
}
impl<T:Get> Other for T {
fn uhoh<U:Get>(&self, foo: U, bar: <(T, U) as Get>::Value) {}
//~^ ERROR the trait bound `(T, U): Get` is not satisfied
}
fn main() { } | //
// See also run-pass/associated-types-projection-to-unrelated-trait.rs,
// which checks that the trait interface itself is not considered an
// error as long as all impls satisfy the constraint.
| random_line_split |
main.rs | /*
License stuff
----------------------------------------------------------
Copyright 2013 Joris Rehm
Licensed under the Apache License, Version 2.0.
Licensed under The MIT License.
See joined files "LICENSE-APACHE" and "LICENSE-MIT".
----------------------------------------------------------
*/
extern mod sdl;
use ecs::FunctionalTable;
use ecs::Entity;
use vec::Vec;
use vec::MyFloat;
mod ecs;
mod vec;
/* Let's defining a data structure for managing our ECS. */
struct Manager {
/* There is a data to remember the numbers of created entities. */
priv entities_numbers : uint,
// tables
table_position : FunctionalTable<Vec>,
table_velocity : FunctionalTable<Vec>,
table_acceleration : FunctionalTable<Vec>,
table_mass : FunctionalTable<MyFloat>
}
impl Manager {
/* Inializing an empty ECS */
fn new() -> ~Manager {
~Manager {
entities_numbers : 0,
table_position : FunctionalTable::new(~"position"),
table_velocity : FunctionalTable::new(~"velocity"),
table_acceleration : FunctionalTable::new(~"acceleration"),
table_mass : FunctionalTable::new(~"mass")
}
}
/* Creating an new entity is just taking a unused id. */
fn new_entity(&mut self) -> Entity {
let res = self.entities_numbers;
self.entities_numbers = self.entities_numbers + 1;
info!("create_entity id={}", res);
res
}
/* TODO: how to manage the deletion of entities?
It depend if we iterate on entity id or on component tables.
Maybe it's enought to delete the associated components.
We may use a pool of unused id for recycling (probably not
very needed) or just search for not associated id.*/
} /* impl Manager */
#[test]
fn test_entity_management() {
let mut mng = Manager::new();
let e1 = mng.new_entity();
assert!( e1 == 0 );
let e2 = mng.new_entity();
assert!( e2 == 1 );
}
/* ----------------------------------------------------------- */
// TODO define an integration function and refactor
// compute_velocity and compute_position.
// TODO it's not really usefull to record acceleration
/* ----------------------------------------------------------- */
fn init_system() -> ~Manager {
println("use RUST_LOG=3 to see log");
let mut mng = Manager::new();
let zero = Vec(0., 0., 0.);
//
for i in range(0,10) {
let entity2 = mng.new_entity();
mng.table_position.set(entity2, Vec::rand_around_origin(5., 25.) );
mng.table_velocity.set(entity2, Vec::rand_around_origin(0.2, 0.5) );
mng.table_acceleration.set(entity2, zero);
mng.table_mass.set(entity2, 1.);
}
//
return mng;
}
fn system_loop(mng : &mut Manager, cycle : uint, screen: &sdl::video::Surface){
let delta_time = 1./24. as MyFloat; // TODO compute real delay
info!("start_cycle {}",cycle);
compute_acceleration(mng);
compute_velocity(mng, delta_time);
compute_position(mng, delta_time);
draw(mng,screen)
}
fn draw(mng: &Manager, screen: &sdl::video::Surface){
screen.clear();
for x in mng.table_position.iter() {
let (&e,pos) : (&Entity,&Vec) = x;
let Vec(pos_x,pos_y,_) = *pos;
let mut x = 0;
let mut y = 0;
let scale = 5.;
x = (pos_x*scale + 400.) as i16;
y = (pos_y*scale + 300.) as i16 ;
draw_circle(screen, x, y);
}
}
// Having an acceleration imply having a velocity!
// Having an acceleration imply having a mass!
fn compute_acceleration(mng: &mut Manager) {
let comp = |e: uint, accel: &Vec| {
// note: accel not used
let pos = mng.table_position.get(e);
let force = gravity_force_at(*pos);
let mass = *mng.table_mass.get(e);
force.scale(1./mass)
};
mng.table_acceleration.apply(comp);
}
fn gravity_force_at(pos: Vec) -> Vec {
let earth_gravity = 9.80665; // in m/s²
let planet_center = Vec(0.,0.,0.);
// the vector from planet center to the pos
let pos_from_planet = pos - planet_center;
// TODO and yes it's useless :)
let dist = pos_from_planet.length();
let gravity_force =
pos_from_planet.normalize(
).scale(- earth_gravity
).scale(1./(dist*dist));
if gravity_force.is_nan() {
Vec(0.,0.,0.)
} else {
gravity_force
}
}
// integration of acceleration
// having a acceleration imply having a velocity
fn compute_velocity (mng: &mut Manager, dt: MyFloat) {
for x in mng.table_acceleration.iter() {
let (&e,accel) : (&Entity,&Vec) = x;
let vel : Vec = *mng.table_velocity.get(e);
let new_vel = vel + accel.scale(dt);
mng.table_velocity.set(e, new_vel);
}
}
// Move all the entities with the velocity applied during dt time.
// Having a velocity imply having a position!
fn compute_position(mng: &mut Manager, dt:MyFloat) {
for x in mng.table_velocity.iter() {
let (&e,vel) : (&Entity,&Vec) = x;
let pos = *mng.table_position.get(e);
let new_pos = pos + vel.scale(dt);
mng.table_position.set(e, new_pos);
}
}
fn draw_circle(screen : &sdl::video::Surface, px : i16, py : i16){
// this algorithm is dumb
// TODO use a proper algorithm for circle drawing
let r = 10;
let mut x = -r;
let mut y = -r;
while(y <= r){
if x*x + y*y <= r*r {
draw_plot(screen, px+x, py+y);
}
x += 1;
if r < x {
x = -r;
y += 1;
}
}
}
fn draw_plot(screen : &sdl::video::Surface, px : i16, py : i16){
let rect = Some(sdl::Rect {
x: px,
y: py,
w: 1,
h: 1
});
let color= sdl::video::RGB(255,255,255);
screen.fill_rect(rect,color);
}
fn main() {
sdl::init([sdl::InitVideo]);
sdl::wm::set_caption("Bouncing Balls On a Big Ball", "Bobobib");
let screen = match sdl::video::set_video_mode
(800, 600, 32, [sdl::video::HWSurface],[sdl::video::DoubleBuf]) {
Ok(screen) => screen,
Err(err) => fail!("Impossible to open screen: {}", err)
};
let mut mng = init_system();
let mut cycle = 1; | 'event : loop {
match sdl::event::poll_event() {
sdl::event::QuitEvent => break'main,
sdl::event::NoEvent => break 'event,
sdl::event::KeyEvent(k, _, _, _)
if k == sdl::event::EscapeKey
=> break'main,
_ => {}
}
}
system_loop(mng, cycle, screen);
cycle += 1;
screen.flip();
// if cycle == 100 {
// break'main;
// }
}
sdl::quit();
} |
'main : loop { | random_line_split |
main.rs | /*
License stuff
----------------------------------------------------------
Copyright 2013 Joris Rehm
Licensed under the Apache License, Version 2.0.
Licensed under The MIT License.
See joined files "LICENSE-APACHE" and "LICENSE-MIT".
----------------------------------------------------------
*/
extern mod sdl;
use ecs::FunctionalTable;
use ecs::Entity;
use vec::Vec;
use vec::MyFloat;
mod ecs;
mod vec;
/* Let's defining a data structure for managing our ECS. */
struct Manager {
/* There is a data to remember the numbers of created entities. */
priv entities_numbers : uint,
// tables
table_position : FunctionalTable<Vec>,
table_velocity : FunctionalTable<Vec>,
table_acceleration : FunctionalTable<Vec>,
table_mass : FunctionalTable<MyFloat>
}
impl Manager {
/* Inializing an empty ECS */
fn new() -> ~Manager {
~Manager {
entities_numbers : 0,
table_position : FunctionalTable::new(~"position"),
table_velocity : FunctionalTable::new(~"velocity"),
table_acceleration : FunctionalTable::new(~"acceleration"),
table_mass : FunctionalTable::new(~"mass")
}
}
/* Creating an new entity is just taking a unused id. */
fn new_entity(&mut self) -> Entity {
let res = self.entities_numbers;
self.entities_numbers = self.entities_numbers + 1;
info!("create_entity id={}", res);
res
}
/* TODO: how to manage the deletion of entities?
It depend if we iterate on entity id or on component tables.
Maybe it's enought to delete the associated components.
We may use a pool of unused id for recycling (probably not
very needed) or just search for not associated id.*/
} /* impl Manager */
#[test]
fn test_entity_management() |
/* ----------------------------------------------------------- */
// TODO define an integration function and refactor
// compute_velocity and compute_position.
// TODO it's not really usefull to record acceleration
/* ----------------------------------------------------------- */
fn init_system() -> ~Manager {
println("use RUST_LOG=3 to see log");
let mut mng = Manager::new();
let zero = Vec(0., 0., 0.);
//
for i in range(0,10) {
let entity2 = mng.new_entity();
mng.table_position.set(entity2, Vec::rand_around_origin(5., 25.) );
mng.table_velocity.set(entity2, Vec::rand_around_origin(0.2, 0.5) );
mng.table_acceleration.set(entity2, zero);
mng.table_mass.set(entity2, 1.);
}
//
return mng;
}
fn system_loop(mng : &mut Manager, cycle : uint, screen: &sdl::video::Surface){
let delta_time = 1./24. as MyFloat; // TODO compute real delay
info!("start_cycle {}",cycle);
compute_acceleration(mng);
compute_velocity(mng, delta_time);
compute_position(mng, delta_time);
draw(mng,screen)
}
fn draw(mng: &Manager, screen: &sdl::video::Surface){
screen.clear();
for x in mng.table_position.iter() {
let (&e,pos) : (&Entity,&Vec) = x;
let Vec(pos_x,pos_y,_) = *pos;
let mut x = 0;
let mut y = 0;
let scale = 5.;
x = (pos_x*scale + 400.) as i16;
y = (pos_y*scale + 300.) as i16 ;
draw_circle(screen, x, y);
}
}
// Having an acceleration imply having a velocity!
// Having an acceleration imply having a mass!
fn compute_acceleration(mng: &mut Manager) {
let comp = |e: uint, accel: &Vec| {
// note: accel not used
let pos = mng.table_position.get(e);
let force = gravity_force_at(*pos);
let mass = *mng.table_mass.get(e);
force.scale(1./mass)
};
mng.table_acceleration.apply(comp);
}
fn gravity_force_at(pos: Vec) -> Vec {
let earth_gravity = 9.80665; // in m/s²
let planet_center = Vec(0.,0.,0.);
// the vector from planet center to the pos
let pos_from_planet = pos - planet_center;
// TODO and yes it's useless :)
let dist = pos_from_planet.length();
let gravity_force =
pos_from_planet.normalize(
).scale(- earth_gravity
).scale(1./(dist*dist));
if gravity_force.is_nan() {
Vec(0.,0.,0.)
} else {
gravity_force
}
}
// integration of acceleration
// having a acceleration imply having a velocity
fn compute_velocity (mng: &mut Manager, dt: MyFloat) {
for x in mng.table_acceleration.iter() {
let (&e,accel) : (&Entity,&Vec) = x;
let vel : Vec = *mng.table_velocity.get(e);
let new_vel = vel + accel.scale(dt);
mng.table_velocity.set(e, new_vel);
}
}
// Move all the entities with the velocity applied during dt time.
// Having a velocity imply having a position!
fn compute_position(mng: &mut Manager, dt:MyFloat) {
for x in mng.table_velocity.iter() {
let (&e,vel) : (&Entity,&Vec) = x;
let pos = *mng.table_position.get(e);
let new_pos = pos + vel.scale(dt);
mng.table_position.set(e, new_pos);
}
}
fn draw_circle(screen : &sdl::video::Surface, px : i16, py : i16){
// this algorithm is dumb
// TODO use a proper algorithm for circle drawing
let r = 10;
let mut x = -r;
let mut y = -r;
while(y <= r){
if x*x + y*y <= r*r {
draw_plot(screen, px+x, py+y);
}
x += 1;
if r < x {
x = -r;
y += 1;
}
}
}
fn draw_plot(screen : &sdl::video::Surface, px : i16, py : i16){
let rect = Some(sdl::Rect {
x: px,
y: py,
w: 1,
h: 1
});
let color= sdl::video::RGB(255,255,255);
screen.fill_rect(rect,color);
}
fn main() {
sdl::init([sdl::InitVideo]);
sdl::wm::set_caption("Bouncing Balls On a Big Ball", "Bobobib");
let screen = match sdl::video::set_video_mode
(800, 600, 32, [sdl::video::HWSurface],[sdl::video::DoubleBuf]) {
Ok(screen) => screen,
Err(err) => fail!("Impossible to open screen: {}", err)
};
let mut mng = init_system();
let mut cycle = 1;
'main : loop {
'event : loop {
match sdl::event::poll_event() {
sdl::event::QuitEvent => break'main,
sdl::event::NoEvent => break 'event,
sdl::event::KeyEvent(k, _, _, _)
if k == sdl::event::EscapeKey
=> break'main,
_ => {}
}
}
system_loop(mng, cycle, screen);
cycle += 1;
screen.flip();
// if cycle == 100 {
// break'main;
// }
}
sdl::quit();
}
| {
let mut mng = Manager::new();
let e1 = mng.new_entity();
assert!( e1 == 0 );
let e2 = mng.new_entity();
assert!( e2 == 1 );
} | identifier_body |
main.rs | /*
License stuff
----------------------------------------------------------
Copyright 2013 Joris Rehm
Licensed under the Apache License, Version 2.0.
Licensed under The MIT License.
See joined files "LICENSE-APACHE" and "LICENSE-MIT".
----------------------------------------------------------
*/
extern mod sdl;
use ecs::FunctionalTable;
use ecs::Entity;
use vec::Vec;
use vec::MyFloat;
mod ecs;
mod vec;
/* Let's defining a data structure for managing our ECS. */
struct Manager {
/* There is a data to remember the numbers of created entities. */
priv entities_numbers : uint,
// tables
table_position : FunctionalTable<Vec>,
table_velocity : FunctionalTable<Vec>,
table_acceleration : FunctionalTable<Vec>,
table_mass : FunctionalTable<MyFloat>
}
impl Manager {
/* Inializing an empty ECS */
fn new() -> ~Manager {
~Manager {
entities_numbers : 0,
table_position : FunctionalTable::new(~"position"),
table_velocity : FunctionalTable::new(~"velocity"),
table_acceleration : FunctionalTable::new(~"acceleration"),
table_mass : FunctionalTable::new(~"mass")
}
}
/* Creating an new entity is just taking a unused id. */
fn new_entity(&mut self) -> Entity {
let res = self.entities_numbers;
self.entities_numbers = self.entities_numbers + 1;
info!("create_entity id={}", res);
res
}
/* TODO: how to manage the deletion of entities?
It depend if we iterate on entity id or on component tables.
Maybe it's enought to delete the associated components.
We may use a pool of unused id for recycling (probably not
very needed) or just search for not associated id.*/
} /* impl Manager */
#[test]
fn test_entity_management() {
let mut mng = Manager::new();
let e1 = mng.new_entity();
assert!( e1 == 0 );
let e2 = mng.new_entity();
assert!( e2 == 1 );
}
/* ----------------------------------------------------------- */
// TODO define an integration function and refactor
// compute_velocity and compute_position.
// TODO it's not really usefull to record acceleration
/* ----------------------------------------------------------- */
fn | () -> ~Manager {
println("use RUST_LOG=3 to see log");
let mut mng = Manager::new();
let zero = Vec(0., 0., 0.);
//
for i in range(0,10) {
let entity2 = mng.new_entity();
mng.table_position.set(entity2, Vec::rand_around_origin(5., 25.) );
mng.table_velocity.set(entity2, Vec::rand_around_origin(0.2, 0.5) );
mng.table_acceleration.set(entity2, zero);
mng.table_mass.set(entity2, 1.);
}
//
return mng;
}
fn system_loop(mng : &mut Manager, cycle : uint, screen: &sdl::video::Surface){
let delta_time = 1./24. as MyFloat; // TODO compute real delay
info!("start_cycle {}",cycle);
compute_acceleration(mng);
compute_velocity(mng, delta_time);
compute_position(mng, delta_time);
draw(mng,screen)
}
fn draw(mng: &Manager, screen: &sdl::video::Surface){
screen.clear();
for x in mng.table_position.iter() {
let (&e,pos) : (&Entity,&Vec) = x;
let Vec(pos_x,pos_y,_) = *pos;
let mut x = 0;
let mut y = 0;
let scale = 5.;
x = (pos_x*scale + 400.) as i16;
y = (pos_y*scale + 300.) as i16 ;
draw_circle(screen, x, y);
}
}
// Having an acceleration imply having a velocity!
// Having an acceleration imply having a mass!
fn compute_acceleration(mng: &mut Manager) {
let comp = |e: uint, accel: &Vec| {
// note: accel not used
let pos = mng.table_position.get(e);
let force = gravity_force_at(*pos);
let mass = *mng.table_mass.get(e);
force.scale(1./mass)
};
mng.table_acceleration.apply(comp);
}
fn gravity_force_at(pos: Vec) -> Vec {
let earth_gravity = 9.80665; // in m/s²
let planet_center = Vec(0.,0.,0.);
// the vector from planet center to the pos
let pos_from_planet = pos - planet_center;
// TODO and yes it's useless :)
let dist = pos_from_planet.length();
let gravity_force =
pos_from_planet.normalize(
).scale(- earth_gravity
).scale(1./(dist*dist));
if gravity_force.is_nan() {
Vec(0.,0.,0.)
} else {
gravity_force
}
}
// integration of acceleration
// having a acceleration imply having a velocity
fn compute_velocity (mng: &mut Manager, dt: MyFloat) {
for x in mng.table_acceleration.iter() {
let (&e,accel) : (&Entity,&Vec) = x;
let vel : Vec = *mng.table_velocity.get(e);
let new_vel = vel + accel.scale(dt);
mng.table_velocity.set(e, new_vel);
}
}
// Move all the entities with the velocity applied during dt time.
// Having a velocity imply having a position!
fn compute_position(mng: &mut Manager, dt:MyFloat) {
for x in mng.table_velocity.iter() {
let (&e,vel) : (&Entity,&Vec) = x;
let pos = *mng.table_position.get(e);
let new_pos = pos + vel.scale(dt);
mng.table_position.set(e, new_pos);
}
}
fn draw_circle(screen : &sdl::video::Surface, px : i16, py : i16){
// this algorithm is dumb
// TODO use a proper algorithm for circle drawing
let r = 10;
let mut x = -r;
let mut y = -r;
while(y <= r){
if x*x + y*y <= r*r {
draw_plot(screen, px+x, py+y);
}
x += 1;
if r < x {
x = -r;
y += 1;
}
}
}
fn draw_plot(screen : &sdl::video::Surface, px : i16, py : i16){
let rect = Some(sdl::Rect {
x: px,
y: py,
w: 1,
h: 1
});
let color= sdl::video::RGB(255,255,255);
screen.fill_rect(rect,color);
}
fn main() {
sdl::init([sdl::InitVideo]);
sdl::wm::set_caption("Bouncing Balls On a Big Ball", "Bobobib");
let screen = match sdl::video::set_video_mode
(800, 600, 32, [sdl::video::HWSurface],[sdl::video::DoubleBuf]) {
Ok(screen) => screen,
Err(err) => fail!("Impossible to open screen: {}", err)
};
let mut mng = init_system();
let mut cycle = 1;
'main : loop {
'event : loop {
match sdl::event::poll_event() {
sdl::event::QuitEvent => break'main,
sdl::event::NoEvent => break 'event,
sdl::event::KeyEvent(k, _, _, _)
if k == sdl::event::EscapeKey
=> break'main,
_ => {}
}
}
system_loop(mng, cycle, screen);
cycle += 1;
screen.flip();
// if cycle == 100 {
// break'main;
// }
}
sdl::quit();
}
| init_system | identifier_name |
lib.rs | // =================================================================
//
// * WARNING *
//
// This file is generated!
//
// Changes made to this file will be overwritten. If changes are
// required to the generated code, the service_crategen project
// must be updated to generate the changes.
//
// =================================================================
#![doc(html_logo_url = "https://raw.githubusercontent.com/rusoto/rusoto/master/assets/logo-square.png")]
//! <p><fullname>Amazon DynamoDB</fullname> <p>Amazon DynamoDB Streams provides API actions for accessing streams and processing stream records. To learn more about application development with Streams, see <a href="http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html">Capturing Table Activity with DynamoDB Streams</a> in the Amazon DynamoDB Developer Guide.</p></p>
//!
//! If you're using the service, you're probably looking for [DynamoDbStreamsClient](struct.DynamoDbStreamsClient.html) and [DynamoDbStreams](trait.DynamoDbStreams.html).
extern crate futures;
extern crate rusoto_core;
extern crate serde;
#[macro_use] | extern crate serde_derive;
extern crate serde_json;
mod generated;
mod custom;
pub use generated::*;
pub use custom::*; | random_line_split |
|
disallow_id_as_alias.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use common::WithLocation;
use errors::try2;
use graphql_ir::{
LinkedField, Program, ScalarField, ValidationError, ValidationMessage, Validator,
};
use interner::{Intern, StringKey};
use schema::{FieldID, Schema};
pub fn disallow_id_as_alias<'s>(program: &'s Program<'s>) -> Vec<ValidationError> {
let mut validator = DisallowIdAsAlias::new(program);
match validator.validate_program(program) {
Err(e) => e,
Ok(_) => Default::default(),
}
}
struct DisallowIdAsAlias<'s> {
program: &'s Program<'s>,
id_key: StringKey,
}
impl<'s> DisallowIdAsAlias<'s> {
fn new(program: &'s Program<'s>) -> Self { | }
}
}
impl<'s> Validator for DisallowIdAsAlias<'s> {
const NAME: &'static str = "DisallowIdAsAlias";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_linked_field(&mut self, field: &LinkedField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
try2(
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
),
self.validate_selections(&field.selections),
)?;
Ok(())
} else {
self.validate_selections(&field.selections)
}
}
fn validate_scalar_field(&mut self, field: &ScalarField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
)
} else {
Ok(())
}
}
}
fn validate_field_alias<'s>(
schema: &'s Schema,
id_key: StringKey,
alias: &WithLocation<StringKey>,
field: FieldID,
) -> Result<(), Vec<ValidationError>> {
if alias.item == id_key && schema.field(field).name!= id_key {
Err(vec![ValidationError::new(
ValidationMessage::DisallowIdAsAliasError(),
vec![alias.location],
)])
} else {
Ok(())
}
} | Self {
program,
id_key: "id".intern(), | random_line_split |
disallow_id_as_alias.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use common::WithLocation;
use errors::try2;
use graphql_ir::{
LinkedField, Program, ScalarField, ValidationError, ValidationMessage, Validator,
};
use interner::{Intern, StringKey};
use schema::{FieldID, Schema};
pub fn disallow_id_as_alias<'s>(program: &'s Program<'s>) -> Vec<ValidationError> {
let mut validator = DisallowIdAsAlias::new(program);
match validator.validate_program(program) {
Err(e) => e,
Ok(_) => Default::default(),
}
}
struct DisallowIdAsAlias<'s> {
program: &'s Program<'s>,
id_key: StringKey,
}
impl<'s> DisallowIdAsAlias<'s> {
fn new(program: &'s Program<'s>) -> Self {
Self {
program,
id_key: "id".intern(),
}
}
}
impl<'s> Validator for DisallowIdAsAlias<'s> {
const NAME: &'static str = "DisallowIdAsAlias";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_linked_field(&mut self, field: &LinkedField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
try2(
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
),
self.validate_selections(&field.selections),
)?;
Ok(())
} else {
self.validate_selections(&field.selections)
}
}
fn validate_scalar_field(&mut self, field: &ScalarField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
)
} else {
Ok(())
}
}
}
fn validate_field_alias<'s>(
schema: &'s Schema,
id_key: StringKey,
alias: &WithLocation<StringKey>,
field: FieldID,
) -> Result<(), Vec<ValidationError>> {
if alias.item == id_key && schema.field(field).name!= id_key | else {
Ok(())
}
}
| {
Err(vec![ValidationError::new(
ValidationMessage::DisallowIdAsAliasError(),
vec![alias.location],
)])
} | conditional_block |
disallow_id_as_alias.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use common::WithLocation;
use errors::try2;
use graphql_ir::{
LinkedField, Program, ScalarField, ValidationError, ValidationMessage, Validator,
};
use interner::{Intern, StringKey};
use schema::{FieldID, Schema};
pub fn disallow_id_as_alias<'s>(program: &'s Program<'s>) -> Vec<ValidationError> {
let mut validator = DisallowIdAsAlias::new(program);
match validator.validate_program(program) {
Err(e) => e,
Ok(_) => Default::default(),
}
}
struct DisallowIdAsAlias<'s> {
program: &'s Program<'s>,
id_key: StringKey,
}
impl<'s> DisallowIdAsAlias<'s> {
fn new(program: &'s Program<'s>) -> Self {
Self {
program,
id_key: "id".intern(),
}
}
}
impl<'s> Validator for DisallowIdAsAlias<'s> {
const NAME: &'static str = "DisallowIdAsAlias";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_linked_field(&mut self, field: &LinkedField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
try2(
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
),
self.validate_selections(&field.selections),
)?;
Ok(())
} else {
self.validate_selections(&field.selections)
}
}
fn validate_scalar_field(&mut self, field: &ScalarField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
)
} else {
Ok(())
}
}
}
fn validate_field_alias<'s>(
schema: &'s Schema,
id_key: StringKey,
alias: &WithLocation<StringKey>,
field: FieldID,
) -> Result<(), Vec<ValidationError>> | {
if alias.item == id_key && schema.field(field).name != id_key {
Err(vec![ValidationError::new(
ValidationMessage::DisallowIdAsAliasError(),
vec![alias.location],
)])
} else {
Ok(())
}
} | identifier_body |
|
disallow_id_as_alias.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use common::WithLocation;
use errors::try2;
use graphql_ir::{
LinkedField, Program, ScalarField, ValidationError, ValidationMessage, Validator,
};
use interner::{Intern, StringKey};
use schema::{FieldID, Schema};
pub fn | <'s>(program: &'s Program<'s>) -> Vec<ValidationError> {
let mut validator = DisallowIdAsAlias::new(program);
match validator.validate_program(program) {
Err(e) => e,
Ok(_) => Default::default(),
}
}
struct DisallowIdAsAlias<'s> {
program: &'s Program<'s>,
id_key: StringKey,
}
impl<'s> DisallowIdAsAlias<'s> {
fn new(program: &'s Program<'s>) -> Self {
Self {
program,
id_key: "id".intern(),
}
}
}
impl<'s> Validator for DisallowIdAsAlias<'s> {
const NAME: &'static str = "DisallowIdAsAlias";
const VALIDATE_ARGUMENTS: bool = false;
const VALIDATE_DIRECTIVES: bool = false;
fn validate_linked_field(&mut self, field: &LinkedField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
try2(
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
),
self.validate_selections(&field.selections),
)?;
Ok(())
} else {
self.validate_selections(&field.selections)
}
}
fn validate_scalar_field(&mut self, field: &ScalarField) -> Result<(), Vec<ValidationError>> {
if let Some(alias) = field.alias {
validate_field_alias(
self.program.schema(),
self.id_key,
&alias,
field.definition.item,
)
} else {
Ok(())
}
}
}
fn validate_field_alias<'s>(
schema: &'s Schema,
id_key: StringKey,
alias: &WithLocation<StringKey>,
field: FieldID,
) -> Result<(), Vec<ValidationError>> {
if alias.item == id_key && schema.field(field).name!= id_key {
Err(vec![ValidationError::new(
ValidationMessage::DisallowIdAsAliasError(),
vec![alias.location],
)])
} else {
Ok(())
}
}
| disallow_id_as_alias | identifier_name |
borrowck-loan-in-overloaded-op.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct foo(~uint);
impl Add<foo, foo> for foo {
fn add(&self, f: &foo) -> foo {
let foo(~i) = *self;
let foo(~j) = *f;
foo(~(i + j))
}
}
fn | () {
let x = foo(~3);
let _y = x + {x}; // the `{x}` forces a move to occur
//~^ ERROR cannot move out of `x`
}
| main | identifier_name |
borrowck-loan-in-overloaded-op.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct foo(~uint);
impl Add<foo, foo> for foo {
fn add(&self, f: &foo) -> foo |
}
fn main() {
let x = foo(~3);
let _y = x + {x}; // the `{x}` forces a move to occur
//~^ ERROR cannot move out of `x`
}
| {
let foo(~i) = *self;
let foo(~j) = *f;
foo(~(i + j))
} | identifier_body |
borrowck-loan-in-overloaded-op.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. |
impl Add<foo, foo> for foo {
fn add(&self, f: &foo) -> foo {
let foo(~i) = *self;
let foo(~j) = *f;
foo(~(i + j))
}
}
fn main() {
let x = foo(~3);
let _y = x + {x}; // the `{x}` forces a move to occur
//~^ ERROR cannot move out of `x`
} |
struct foo(~uint); | random_line_split |
main.rs | extern crate rand;
use std::fmt;
use std::{thread, time};
use std::process;
use rand::{thread_rng, Rng};
trait State : fmt::Display {
fn do_clock(&self, hour: u32) -> Box<State>;
fn do_use(&self, context: Box<&Context>);
fn do_alarm(&self, context: Box<&Context>);
fn do_phone(&self, context: Box<&Context>);
fn value(&self) -> String;
}
impl PartialEq<State> for State {
fn eq(&self, other: &State) -> bool {
self.value() == other.value()
}
}
trait Context {
fn set_clock(&mut self, hour: u32);
fn change_state(&mut self, state: Box<State>);
fn call_security_center(&self, msg: String);
fn record_log(&self, msg: String);
}
#[derive(PartialEq)]
struct DayState {}
impl DayState {
fn new() -> DayState {
DayState {}
}
}
impl State for DayState {
fn do_clock(&self, hour: u32) -> Box<State> {
if hour < 9 || 17 <= hour {
Box::new(NightState::new())
} else {
Box::new(DayState::new())
}
}
fn do_use(&self, context: Box<&Context>) {
context.record_log("金庫使用(昼間)".to_string());
}
fn do_alarm(&self, context: Box<&Context>) {
context.call_security_center("非常ベル(昼間)".to_string());
}
fn do_phone(&self, context: Box<&Context>) {
context.call_security_center("通常の通話(昼間)".to_string());
}
fn value(&self) -> String {
"昼間".to_string()
}
}
impl fmt::Display for DayState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[昼間]")
}
}
#[derive(PartialEq)]
struct NightState {}
impl NightState {
fn new() -> NightState {
NightState {}
}
}
impl State for NightState {
fn do_clock(&self, hour: u32) -> Box<State> {
if 9 <= hour && hour < 17 {
Box::new(DayState::new())
} else {
Box::new(NightState::new())
}
}
fn do_use(&self, context: Box<&Context>) {
context.call_security_center("非常:夜間の金庫使用!".to_string());
}
fn do_alarm(&self, context: Box<&Context>) {
context.call_security_center("非常ベル(夜間)".to_string());
}
fn do_phone(&self, context: Box<&Context>) {
context.record_log("夜間の通話録音".to_string());
}
fn value(&self) -> String {
"夜間".to_string()
}
}
impl fmt::Display for NightState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[夜間]")
}
}
struct SafeFrame {
title: String,
state: Box<State>,
}
impl SafeFrame {
fn new(title: String, state: Box<State>) -> SafeFrame {
SafeFrame {
title: title,
state: state,
}
}
fn click_use(&self) {
self.state.do_use(Box::new(self));
}
fn click_alarm(&self) {
self.state.do_alarm(Box::new(self));
}
fn click_phone(&self) {
self.state.do_phone(Box::new(self));
} | fn click_exit(&self) {
process::exit(0);
}
}
impl Context for SafeFrame {
fn set_clock(&mut self, hour: u32) {
println!("現在時刻は{0: >02}:00", hour);
let state = self.state.do_clock(hour);
if &self.state!= &state {
self.change_state(state);
}
}
fn change_state(&mut self, state: Box<State>) {
println!("{}から{}へ状態が変化しました。", self.state, state);
self.state = state;
}
fn call_security_center(&self, msg: String) {
println!("call! {}", msg);
}
fn record_log(&self, msg: String) {
println!("record... {}", msg);
}
}
fn main() {
let mut frame = SafeFrame::new("State Sample".to_string(), Box::new(NightState::new()));
let mut rng = thread_rng();
println!("------------");
println!("{}", frame.title);
println!("------------\n");
loop {
for hour in 0..24 {
frame.set_clock(hour);
match rng.gen_range(0, 3) {
0 => frame.click_use(),
1 => frame.click_alarm(),
2 => frame.click_phone(),
_ => frame.click_exit(),
}
thread::sleep(time::Duration::from_millis(1000));
}
}
} | random_line_split |
|
main.rs | extern crate rand;
use std::fmt;
use std::{thread, time};
use std::process;
use rand::{thread_rng, Rng};
trait State : fmt::Display {
fn do_clock(&self, hour: u32) -> Box<State>;
fn do_use(&self, context: Box<&Context>);
fn do_alarm(&self, context: Box<&Context>);
fn do_phone(&self, context: Box<&Context>);
fn value(&self) -> String;
}
impl PartialEq<State> for State {
fn eq(&self, other: &State) -> bool {
self.value() == other.value()
}
}
trait Context {
fn set_clock(&mut self, hour: u32);
fn change_state(&mut self, state: Box<State>);
fn call_security_center(&self, msg: String);
fn record_log(&self, msg: String);
}
#[derive(PartialEq)]
struct DayState {}
impl DayState {
fn new() -> DayState {
DayState {}
}
}
impl State for DayState {
fn do_clock(&self, hour: u32) -> Box<State> {
if hour < 9 || 17 <= hour {
Box::new(NightState::new())
} else {
Box::new(DayState::new())
}
}
fn do_use(&self, context: Box<&Context>) {
context.record_log("金庫使用(昼間)".to_string());
}
fn do_alarm(&self, context: Box<&Context>) {
context.call_security_center("非常ベル(昼間)".to_string());
}
fn do_phone(&self, context: Box<&Context>) {
context.call_security_center("通常の通話(昼間)".to_string());
}
fn value(&self) -> String {
"昼間".to_string()
}
}
impl fmt::Display for DayState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[昼間]")
}
}
#[derive(PartialEq)]
struct NightState {}
impl NightState {
fn new() -> NightState {
NightState {}
}
}
impl State for NightState {
fn do_clock(&self, hour: u32) -> Box<State> {
if 9 <= hour && hour < 17 {
Box::new(DayState::new())
} else {
Box::new(NightState::new())
}
}
fn do_use(&self, context: Box<&Context>) {
context.call_security_center("非常:夜間の金庫使用!".to_string());
}
fn do_alarm(&self, context: Box<&Context>) {
context.call_security_center("非常ベル(夜間)".to_string());
}
fn do_phone(&self, context: Box<&Context>) {
context.record_log("夜間の通話録音".to_string());
}
fn value(&self) -> String {
"夜間".to_string()
}
}
impl fmt::Display for NightState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[夜間]")
}
}
struct SafeFrame {
title: String,
state: Box<State>,
}
impl SafeFrame {
fn new(title: String, state: Box<State>) -> SafeFrame {
SafeFrame {
title: title,
state: state,
}
}
fn click_use(&self) {
self.state.do_use(Box::new(self));
}
fn click_alarm(&self) {
self.state.do_alarm(Box::new(self));
}
fn click_phone(&self) {
self.state.do_phone(Box::new(self));
}
fn click_exit(&self) {
process::exit(0);
}
}
impl Context for SafeFrame {
fn set_clock(&mut self, hour: u32) {
println!("現在時刻は{0: >02}:00", hour);
let state = self.state.do_clock(hour);
if &self.state!= &state {
self.change_state(state);
}
}
fn change_state(&mut self, state: Box<State>) {
println!("{}から{}へ状態が変化しました。", self.state, state);
self.state = state;
}
fn call_security_center(&self, msg: String) {
println!("call! {}", msg);
}
fn record_log(&self, msg: String) {
println!("record... {}", msg);
}
}
f | new("State Sample".to_string(), Box::new(NightState::new()));
let mut rng = thread_rng();
println!("------------");
println!("{}", frame.title);
println!("------------\n");
loop {
for hour in 0..24 {
frame.set_clock(hour);
match rng.gen_range(0, 3) {
0 => frame.click_use(),
1 => frame.click_alarm(),
2 => frame.click_phone(),
_ => frame.click_exit(),
}
thread::sleep(time::Duration::from_millis(1000));
}
}
}
| n main() {
let mut frame = SafeFrame:: | identifier_body |
main.rs | extern crate rand;
use std::fmt;
use std::{thread, time};
use std::process;
use rand::{thread_rng, Rng};
trait State : fmt::Display {
fn do_clock(&self, hour: u32) -> Box<State>;
fn do_use(&self, context: Box<&Context>);
fn do_alarm(&self, context: Box<&Context>);
fn do_phone(&self, context: Box<&Context>);
fn value(&self) -> String;
}
impl PartialEq<State> for State {
fn eq(&self, other: &State) -> bool {
self.value() == other.value()
}
}
trait Context {
fn set_clock(&mut self, hour: u32);
fn change_state(&mut self, state: Box<State>);
fn call_security_center(&self, msg: String);
fn record_log(&self, msg: String);
}
#[derive(PartialEq)]
struct DayState {}
impl DayState {
fn new() -> DayState {
DayState {}
}
}
impl State for DayState {
fn do_clock(&self, hour: u32) -> Box<State> {
if hour < 9 || 17 <= hour {
Box::new(NightState::new())
} else {
Box::new(DayState::new())
}
}
fn do_use(&self, context: Box<&Context>) {
context.record_log("金庫使用(昼間)".to_string());
}
fn do_alarm(&self, context: Box<&Context>) {
context.call_security_center("非常ベル(昼間)".to_string());
}
fn do_phone(&self, context: Box<&Context>) {
context.call_security_center("通常の通話(昼間)".to_string());
}
fn value(&self) -> String {
"昼間".to_string()
}
}
impl fmt::Display for DayState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[昼間]")
}
}
#[derive(PartialEq)]
struct NightState {}
impl NightState {
fn new() -> NightState {
NightState {}
}
}
impl State for NightState {
fn do_clock(&self, hour: u32) -> Box<State> {
| 9 <= hour && hour < 17 {
Box::new(DayState::new())
} else {
Box::new(NightState::new())
}
}
fn do_use(&self, context: Box<&Context>) {
context.call_security_center("非常:夜間の金庫使用!".to_string());
}
fn do_alarm(&self, context: Box<&Context>) {
context.call_security_center("非常ベル(夜間)".to_string());
}
fn do_phone(&self, context: Box<&Context>) {
context.record_log("夜間の通話録音".to_string());
}
fn value(&self) -> String {
"夜間".to_string()
}
}
impl fmt::Display for NightState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[夜間]")
}
}
struct SafeFrame {
title: String,
state: Box<State>,
}
impl SafeFrame {
fn new(title: String, state: Box<State>) -> SafeFrame {
SafeFrame {
title: title,
state: state,
}
}
fn click_use(&self) {
self.state.do_use(Box::new(self));
}
fn click_alarm(&self) {
self.state.do_alarm(Box::new(self));
}
fn click_phone(&self) {
self.state.do_phone(Box::new(self));
}
fn click_exit(&self) {
process::exit(0);
}
}
impl Context for SafeFrame {
fn set_clock(&mut self, hour: u32) {
println!("現在時刻は{0: >02}:00", hour);
let state = self.state.do_clock(hour);
if &self.state!= &state {
self.change_state(state);
}
}
fn change_state(&mut self, state: Box<State>) {
println!("{}から{}へ状態が変化しました。", self.state, state);
self.state = state;
}
fn call_security_center(&self, msg: String) {
println!("call! {}", msg);
}
fn record_log(&self, msg: String) {
println!("record... {}", msg);
}
}
fn main() {
let mut frame = SafeFrame::new("State Sample".to_string(), Box::new(NightState::new()));
let mut rng = thread_rng();
println!("------------");
println!("{}", frame.title);
println!("------------\n");
loop {
for hour in 0..24 {
frame.set_clock(hour);
match rng.gen_range(0, 3) {
0 => frame.click_use(),
1 => frame.click_alarm(),
2 => frame.click_phone(),
_ => frame.click_exit(),
}
thread::sleep(time::Duration::from_millis(1000));
}
}
}
| if | identifier_name |
dynamic_tests.rs | use crate::cell::RefCell;
use crate::collections::HashMap;
use crate::thread_local;
#[test]
fn smoke() {
fn square(i: i32) -> i32 {
i * i
}
thread_local!(static FOO: i32 = square(3));
FOO.with(|f| {
assert_eq!(*f, 9);
});
}
#[test]
fn hashmap() |
#[test]
fn refcell_vec() {
thread_local!(static FOO: RefCell<Vec<u32>> = RefCell::new(vec![1, 2, 3]));
FOO.with(|vec| {
assert_eq!(vec.borrow().len(), 3);
vec.borrow_mut().push(4);
assert_eq!(vec.borrow()[3], 4);
});
}
| {
fn map() -> RefCell<HashMap<i32, i32>> {
let mut m = HashMap::new();
m.insert(1, 2);
RefCell::new(m)
}
thread_local!(static FOO: RefCell<HashMap<i32, i32>> = map());
FOO.with(|map| {
assert_eq!(map.borrow()[&1], 2);
});
} | identifier_body |
dynamic_tests.rs | use crate::cell::RefCell;
use crate::collections::HashMap;
use crate::thread_local;
#[test]
fn smoke() {
fn square(i: i32) -> i32 {
i * i
}
thread_local!(static FOO: i32 = square(3));
FOO.with(|f| {
assert_eq!(*f, 9);
});
}
#[test]
fn hashmap() {
fn map() -> RefCell<HashMap<i32, i32>> {
let mut m = HashMap::new();
m.insert(1, 2);
RefCell::new(m) |
FOO.with(|map| {
assert_eq!(map.borrow()[&1], 2);
});
}
#[test]
fn refcell_vec() {
thread_local!(static FOO: RefCell<Vec<u32>> = RefCell::new(vec![1, 2, 3]));
FOO.with(|vec| {
assert_eq!(vec.borrow().len(), 3);
vec.borrow_mut().push(4);
assert_eq!(vec.borrow()[3], 4);
});
} | }
thread_local!(static FOO: RefCell<HashMap<i32, i32>> = map()); | random_line_split |
dynamic_tests.rs | use crate::cell::RefCell;
use crate::collections::HashMap;
use crate::thread_local;
#[test]
fn smoke() {
fn | (i: i32) -> i32 {
i * i
}
thread_local!(static FOO: i32 = square(3));
FOO.with(|f| {
assert_eq!(*f, 9);
});
}
#[test]
fn hashmap() {
fn map() -> RefCell<HashMap<i32, i32>> {
let mut m = HashMap::new();
m.insert(1, 2);
RefCell::new(m)
}
thread_local!(static FOO: RefCell<HashMap<i32, i32>> = map());
FOO.with(|map| {
assert_eq!(map.borrow()[&1], 2);
});
}
#[test]
fn refcell_vec() {
thread_local!(static FOO: RefCell<Vec<u32>> = RefCell::new(vec![1, 2, 3]));
FOO.with(|vec| {
assert_eq!(vec.borrow().len(), 3);
vec.borrow_mut().push(4);
assert_eq!(vec.borrow()[3], 4);
});
}
| square | identifier_name |
prefs.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use basedir::default_config_dir;
use embedder_traits::resources::{self, Resource};
use num_cpus;
use opts;
use rustc_serialize::json::{Json, ToJson};
use std::borrow::ToOwned;
use std::cmp::max;
use std::collections::HashMap;
use std::fs::File;
use std::io::{Read, Write, stderr};
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
lazy_static! {
pub static ref PREFS: Preferences = {
let defaults = default_prefs();
if let Ok(prefs) = read_prefs(&resources::read_string(Resource::Preferences)) {
defaults.extend(prefs);
}
defaults
};
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum PrefValue {
Boolean(bool),
String(String),
Number(f64),
Missing,
}
impl PrefValue {
pub fn from_json(data: Json) -> Result<PrefValue, ()> {
let value = match data {
Json::Boolean(x) => PrefValue::Boolean(x),
Json::String(x) => PrefValue::String(x),
Json::F64(x) => PrefValue::Number(x),
Json::I64(x) => PrefValue::Number(x as f64),
Json::U64(x) => PrefValue::Number(x as f64),
_ => return Err(()),
};
Ok(value)
}
pub fn as_boolean(&self) -> Option<bool> {
match *self {
PrefValue::Boolean(value) => Some(value),
_ => None,
}
}
pub fn as_string(&self) -> Option<&str> {
match *self {
PrefValue::String(ref value) => Some(&value),
_ => None,
}
}
pub fn as_i64(&self) -> Option<i64> {
match *self {
PrefValue::Number(x) => Some(x as i64),
_ => None,
}
}
pub fn as_u64(&self) -> Option<u64> {
match *self {
PrefValue::Number(x) => Some(x as u64),
_ => None,
}
}
}
impl ToJson for PrefValue {
fn to_json(&self) -> Json {
match *self {
PrefValue::Boolean(x) => Json::Boolean(x),
PrefValue::String(ref x) => Json::String(x.clone()),
PrefValue::Number(x) => Json::F64(x),
PrefValue::Missing => Json::Null,
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum Pref {
NoDefault(Arc<PrefValue>),
WithDefault(Arc<PrefValue>, Option<Arc<PrefValue>>),
}
impl Pref {
pub fn new(value: PrefValue) -> Pref {
Pref::NoDefault(Arc::new(value))
}
fn new_default(value: PrefValue) -> Pref {
Pref::WithDefault(Arc::new(value), None)
}
fn from_json(data: Json) -> Result<Pref, ()> {
let value = PrefValue::from_json(data)?;
Ok(Pref::new_default(value))
}
pub fn value(&self) -> &Arc<PrefValue> {
match *self {
Pref::NoDefault(ref x) => x,
Pref::WithDefault(ref default, ref override_value) => match *override_value {
Some(ref x) => x,
None => default,
},
}
}
fn set(&mut self, value: PrefValue) {
// TODO - this should error if we try to override a pref of one type
// with a value of a different type
match *self {
Pref::NoDefault(ref mut pref_value) => *pref_value = Arc::new(value),
Pref::WithDefault(_, ref mut override_value) => *override_value = Some(Arc::new(value)),
}
}
}
impl ToJson for Pref {
fn to_json(&self) -> Json |
}
pub fn default_prefs() -> Preferences {
let prefs = Preferences(Arc::new(RwLock::new(HashMap::new())));
prefs.set(
"layout.threads",
PrefValue::Number(max(num_cpus::get() * 3 / 4, 1) as f64),
);
prefs
}
pub fn read_prefs(txt: &str) -> Result<HashMap<String, Pref>, ()> {
let json = Json::from_str(txt).or_else(|e| {
println!("Ignoring invalid JSON in preferences: {:?}.", e);
Err(())
})?;
let mut prefs = HashMap::new();
if let Json::Object(obj) = json {
for (name, value) in obj.into_iter() {
match Pref::from_json(value) {
Ok(x) => {
prefs.insert(name, x);
},
Err(_) => println!(
"Ignoring non-boolean/string/i64 preference value for {:?}",
name
),
}
}
}
Ok(prefs)
}
pub fn add_user_prefs() {
match opts::get().config_dir {
Some(ref config_path) => {
let mut path = PathBuf::from(config_path);
init_user_prefs(&mut path);
},
None => {
if let Some(mut path) = default_config_dir() {
if path.join("prefs.json").exists() {
init_user_prefs(&mut path);
}
}
},
}
}
fn init_user_prefs(path: &mut PathBuf) {
path.push("prefs.json");
if let Ok(mut file) = File::open(path) {
let mut txt = String::new();
file.read_to_string(&mut txt).expect("Can't read use prefs");
if let Ok(prefs) = read_prefs(&txt) {
PREFS.extend(prefs);
}
} else {
writeln!(
&mut stderr(),
"Error opening prefs.json from config directory"
).expect("failed printing to stderr");
}
}
pub struct Preferences(Arc<RwLock<HashMap<String, Pref>>>);
impl Preferences {
pub fn get(&self, name: &str) -> Arc<PrefValue> {
self.0
.read()
.unwrap()
.get(name)
.map_or(Arc::new(PrefValue::Missing), |x| x.value().clone())
}
pub fn cloned(&self) -> HashMap<String, Pref> {
self.0.read().unwrap().clone()
}
pub fn set(&self, name: &str, value: PrefValue) {
let mut prefs = self.0.write().unwrap();
if let Some(pref) = prefs.get_mut(name) {
pref.set(value);
return;
}
prefs.insert(name.to_owned(), Pref::new(value));
}
pub fn reset(&self, name: &str) -> Arc<PrefValue> {
let mut prefs = self.0.write().unwrap();
let result = match prefs.get_mut(name) {
None => return Arc::new(PrefValue::Missing),
Some(&mut Pref::NoDefault(_)) => Arc::new(PrefValue::Missing),
Some(&mut Pref::WithDefault(ref default, ref mut set_value)) => {
*set_value = None;
default.clone()
},
};
if *result == PrefValue::Missing {
prefs.remove(name);
}
result
}
pub fn reset_all(&self) {
let names = {
self.0
.read()
.unwrap()
.keys()
.cloned()
.collect::<Vec<String>>()
};
for name in names.iter() {
self.reset(name);
}
}
pub fn extend(&self, extension: HashMap<String, Pref>) {
self.0.write().unwrap().extend(extension);
}
pub fn is_webvr_enabled(&self) -> bool {
self.get("dom.webvr.enabled").as_boolean().unwrap_or(false)
}
pub fn is_dom_to_texture_enabled(&self) -> bool {
self.get("dom.webgl.dom_to_texture.enabled")
.as_boolean()
.unwrap_or(false)
}
pub fn is_webgl2_enabled(&self) -> bool {
self.get("dom.webgl2.enabled").as_boolean().unwrap_or(false)
}
}
| {
self.value().to_json()
} | identifier_body |
prefs.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use basedir::default_config_dir;
use embedder_traits::resources::{self, Resource};
use num_cpus;
use opts;
use rustc_serialize::json::{Json, ToJson};
use std::borrow::ToOwned;
use std::cmp::max;
use std::collections::HashMap;
use std::fs::File;
use std::io::{Read, Write, stderr};
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
lazy_static! {
pub static ref PREFS: Preferences = {
let defaults = default_prefs();
if let Ok(prefs) = read_prefs(&resources::read_string(Resource::Preferences)) {
defaults.extend(prefs);
}
defaults
};
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum PrefValue {
Boolean(bool),
String(String),
Number(f64),
Missing,
}
impl PrefValue {
pub fn from_json(data: Json) -> Result<PrefValue, ()> {
let value = match data {
Json::Boolean(x) => PrefValue::Boolean(x),
Json::String(x) => PrefValue::String(x),
Json::F64(x) => PrefValue::Number(x),
Json::I64(x) => PrefValue::Number(x as f64),
Json::U64(x) => PrefValue::Number(x as f64),
_ => return Err(()),
};
Ok(value)
}
pub fn as_boolean(&self) -> Option<bool> {
match *self {
PrefValue::Boolean(value) => Some(value),
_ => None,
}
}
pub fn as_string(&self) -> Option<&str> {
match *self {
PrefValue::String(ref value) => Some(&value),
_ => None,
}
}
pub fn as_i64(&self) -> Option<i64> {
match *self {
PrefValue::Number(x) => Some(x as i64),
_ => None,
}
}
pub fn as_u64(&self) -> Option<u64> {
match *self {
PrefValue::Number(x) => Some(x as u64),
_ => None,
}
}
}
impl ToJson for PrefValue {
fn to_json(&self) -> Json {
match *self {
PrefValue::Boolean(x) => Json::Boolean(x),
PrefValue::String(ref x) => Json::String(x.clone()),
PrefValue::Number(x) => Json::F64(x),
PrefValue::Missing => Json::Null,
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum Pref {
NoDefault(Arc<PrefValue>),
WithDefault(Arc<PrefValue>, Option<Arc<PrefValue>>),
}
impl Pref {
pub fn new(value: PrefValue) -> Pref {
Pref::NoDefault(Arc::new(value))
}
fn new_default(value: PrefValue) -> Pref {
Pref::WithDefault(Arc::new(value), None)
}
fn from_json(data: Json) -> Result<Pref, ()> {
let value = PrefValue::from_json(data)?;
Ok(Pref::new_default(value))
}
pub fn value(&self) -> &Arc<PrefValue> {
match *self {
Pref::NoDefault(ref x) => x,
Pref::WithDefault(ref default, ref override_value) => match *override_value {
Some(ref x) => x,
None => default,
},
}
}
fn set(&mut self, value: PrefValue) {
// TODO - this should error if we try to override a pref of one type
// with a value of a different type
match *self {
Pref::NoDefault(ref mut pref_value) => *pref_value = Arc::new(value),
Pref::WithDefault(_, ref mut override_value) => *override_value = Some(Arc::new(value)),
}
}
}
impl ToJson for Pref {
fn to_json(&self) -> Json {
self.value().to_json()
}
}
pub fn default_prefs() -> Preferences {
let prefs = Preferences(Arc::new(RwLock::new(HashMap::new())));
prefs.set(
"layout.threads",
PrefValue::Number(max(num_cpus::get() * 3 / 4, 1) as f64),
);
prefs
}
pub fn read_prefs(txt: &str) -> Result<HashMap<String, Pref>, ()> {
let json = Json::from_str(txt).or_else(|e| {
println!("Ignoring invalid JSON in preferences: {:?}.", e);
Err(())
})?;
let mut prefs = HashMap::new();
if let Json::Object(obj) = json {
for (name, value) in obj.into_iter() {
match Pref::from_json(value) {
Ok(x) => {
prefs.insert(name, x);
},
Err(_) => println!(
"Ignoring non-boolean/string/i64 preference value for {:?}",
name
),
}
}
}
Ok(prefs)
}
pub fn add_user_prefs() {
match opts::get().config_dir {
Some(ref config_path) => {
let mut path = PathBuf::from(config_path);
init_user_prefs(&mut path);
},
None => {
if let Some(mut path) = default_config_dir() {
if path.join("prefs.json").exists() {
init_user_prefs(&mut path);
}
}
},
}
}
fn init_user_prefs(path: &mut PathBuf) {
path.push("prefs.json");
if let Ok(mut file) = File::open(path) {
let mut txt = String::new();
file.read_to_string(&mut txt).expect("Can't read use prefs");
if let Ok(prefs) = read_prefs(&txt) {
PREFS.extend(prefs);
}
} else {
writeln!(
&mut stderr(),
"Error opening prefs.json from config directory"
).expect("failed printing to stderr");
}
}
pub struct Preferences(Arc<RwLock<HashMap<String, Pref>>>);
impl Preferences {
pub fn get(&self, name: &str) -> Arc<PrefValue> {
self.0
.read()
.unwrap()
.get(name)
.map_or(Arc::new(PrefValue::Missing), |x| x.value().clone())
}
pub fn cloned(&self) -> HashMap<String, Pref> {
self.0.read().unwrap().clone()
}
pub fn set(&self, name: &str, value: PrefValue) {
let mut prefs = self.0.write().unwrap();
if let Some(pref) = prefs.get_mut(name) {
pref.set(value);
return;
}
prefs.insert(name.to_owned(), Pref::new(value));
}
pub fn reset(&self, name: &str) -> Arc<PrefValue> {
let mut prefs = self.0.write().unwrap();
let result = match prefs.get_mut(name) {
None => return Arc::new(PrefValue::Missing),
Some(&mut Pref::NoDefault(_)) => Arc::new(PrefValue::Missing),
Some(&mut Pref::WithDefault(ref default, ref mut set_value)) => {
*set_value = None;
default.clone()
},
};
if *result == PrefValue::Missing {
prefs.remove(name);
}
result
}
pub fn reset_all(&self) {
let names = {
self.0
.read()
.unwrap()
.keys()
.cloned()
.collect::<Vec<String>>()
};
for name in names.iter() {
self.reset(name);
}
}
pub fn extend(&self, extension: HashMap<String, Pref>) {
self.0.write().unwrap().extend(extension);
}
pub fn is_webvr_enabled(&self) -> bool {
self.get("dom.webvr.enabled").as_boolean().unwrap_or(false)
}
pub fn is_dom_to_texture_enabled(&self) -> bool {
self.get("dom.webgl.dom_to_texture.enabled")
.as_boolean()
.unwrap_or(false)
}
pub fn | (&self) -> bool {
self.get("dom.webgl2.enabled").as_boolean().unwrap_or(false)
}
}
| is_webgl2_enabled | identifier_name |
prefs.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use basedir::default_config_dir;
use embedder_traits::resources::{self, Resource};
use num_cpus;
use opts;
use rustc_serialize::json::{Json, ToJson};
use std::borrow::ToOwned;
use std::cmp::max;
use std::collections::HashMap;
use std::fs::File;
use std::io::{Read, Write, stderr};
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
lazy_static! {
pub static ref PREFS: Preferences = {
let defaults = default_prefs();
if let Ok(prefs) = read_prefs(&resources::read_string(Resource::Preferences)) {
defaults.extend(prefs);
}
defaults
};
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum PrefValue {
Boolean(bool),
String(String),
Number(f64),
Missing,
}
impl PrefValue {
pub fn from_json(data: Json) -> Result<PrefValue, ()> {
let value = match data {
Json::Boolean(x) => PrefValue::Boolean(x),
Json::String(x) => PrefValue::String(x),
Json::F64(x) => PrefValue::Number(x),
Json::I64(x) => PrefValue::Number(x as f64),
Json::U64(x) => PrefValue::Number(x as f64),
_ => return Err(()),
};
Ok(value)
}
pub fn as_boolean(&self) -> Option<bool> {
match *self {
PrefValue::Boolean(value) => Some(value),
_ => None,
}
}
pub fn as_string(&self) -> Option<&str> {
match *self {
PrefValue::String(ref value) => Some(&value),
_ => None,
}
}
pub fn as_i64(&self) -> Option<i64> {
match *self {
PrefValue::Number(x) => Some(x as i64),
_ => None,
}
}
pub fn as_u64(&self) -> Option<u64> {
match *self {
PrefValue::Number(x) => Some(x as u64),
_ => None,
}
}
}
impl ToJson for PrefValue {
fn to_json(&self) -> Json {
match *self {
PrefValue::Boolean(x) => Json::Boolean(x),
PrefValue::String(ref x) => Json::String(x.clone()),
PrefValue::Number(x) => Json::F64(x),
PrefValue::Missing => Json::Null,
}
} | WithDefault(Arc<PrefValue>, Option<Arc<PrefValue>>),
}
impl Pref {
pub fn new(value: PrefValue) -> Pref {
Pref::NoDefault(Arc::new(value))
}
fn new_default(value: PrefValue) -> Pref {
Pref::WithDefault(Arc::new(value), None)
}
fn from_json(data: Json) -> Result<Pref, ()> {
let value = PrefValue::from_json(data)?;
Ok(Pref::new_default(value))
}
pub fn value(&self) -> &Arc<PrefValue> {
match *self {
Pref::NoDefault(ref x) => x,
Pref::WithDefault(ref default, ref override_value) => match *override_value {
Some(ref x) => x,
None => default,
},
}
}
fn set(&mut self, value: PrefValue) {
// TODO - this should error if we try to override a pref of one type
// with a value of a different type
match *self {
Pref::NoDefault(ref mut pref_value) => *pref_value = Arc::new(value),
Pref::WithDefault(_, ref mut override_value) => *override_value = Some(Arc::new(value)),
}
}
}
impl ToJson for Pref {
fn to_json(&self) -> Json {
self.value().to_json()
}
}
pub fn default_prefs() -> Preferences {
let prefs = Preferences(Arc::new(RwLock::new(HashMap::new())));
prefs.set(
"layout.threads",
PrefValue::Number(max(num_cpus::get() * 3 / 4, 1) as f64),
);
prefs
}
pub fn read_prefs(txt: &str) -> Result<HashMap<String, Pref>, ()> {
let json = Json::from_str(txt).or_else(|e| {
println!("Ignoring invalid JSON in preferences: {:?}.", e);
Err(())
})?;
let mut prefs = HashMap::new();
if let Json::Object(obj) = json {
for (name, value) in obj.into_iter() {
match Pref::from_json(value) {
Ok(x) => {
prefs.insert(name, x);
},
Err(_) => println!(
"Ignoring non-boolean/string/i64 preference value for {:?}",
name
),
}
}
}
Ok(prefs)
}
pub fn add_user_prefs() {
match opts::get().config_dir {
Some(ref config_path) => {
let mut path = PathBuf::from(config_path);
init_user_prefs(&mut path);
},
None => {
if let Some(mut path) = default_config_dir() {
if path.join("prefs.json").exists() {
init_user_prefs(&mut path);
}
}
},
}
}
fn init_user_prefs(path: &mut PathBuf) {
path.push("prefs.json");
if let Ok(mut file) = File::open(path) {
let mut txt = String::new();
file.read_to_string(&mut txt).expect("Can't read use prefs");
if let Ok(prefs) = read_prefs(&txt) {
PREFS.extend(prefs);
}
} else {
writeln!(
&mut stderr(),
"Error opening prefs.json from config directory"
).expect("failed printing to stderr");
}
}
pub struct Preferences(Arc<RwLock<HashMap<String, Pref>>>);
impl Preferences {
pub fn get(&self, name: &str) -> Arc<PrefValue> {
self.0
.read()
.unwrap()
.get(name)
.map_or(Arc::new(PrefValue::Missing), |x| x.value().clone())
}
pub fn cloned(&self) -> HashMap<String, Pref> {
self.0.read().unwrap().clone()
}
pub fn set(&self, name: &str, value: PrefValue) {
let mut prefs = self.0.write().unwrap();
if let Some(pref) = prefs.get_mut(name) {
pref.set(value);
return;
}
prefs.insert(name.to_owned(), Pref::new(value));
}
pub fn reset(&self, name: &str) -> Arc<PrefValue> {
let mut prefs = self.0.write().unwrap();
let result = match prefs.get_mut(name) {
None => return Arc::new(PrefValue::Missing),
Some(&mut Pref::NoDefault(_)) => Arc::new(PrefValue::Missing),
Some(&mut Pref::WithDefault(ref default, ref mut set_value)) => {
*set_value = None;
default.clone()
},
};
if *result == PrefValue::Missing {
prefs.remove(name);
}
result
}
pub fn reset_all(&self) {
let names = {
self.0
.read()
.unwrap()
.keys()
.cloned()
.collect::<Vec<String>>()
};
for name in names.iter() {
self.reset(name);
}
}
pub fn extend(&self, extension: HashMap<String, Pref>) {
self.0.write().unwrap().extend(extension);
}
pub fn is_webvr_enabled(&self) -> bool {
self.get("dom.webvr.enabled").as_boolean().unwrap_or(false)
}
pub fn is_dom_to_texture_enabled(&self) -> bool {
self.get("dom.webgl.dom_to_texture.enabled")
.as_boolean()
.unwrap_or(false)
}
pub fn is_webgl2_enabled(&self) -> bool {
self.get("dom.webgl2.enabled").as_boolean().unwrap_or(false)
}
} | }
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum Pref {
NoDefault(Arc<PrefValue>), | random_line_split |
preprocessor_tests.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
errors::*,
preprocessor::{build_transactions, extract_global_config, split_input},
};
fn parse_input(input: &str) -> Result<()> {
let config = extract_global_config("".lines(), false)?;
let (_, transactions) = split_input(input.lines(), &config)?;
build_transactions(&config, &transactions)?;
Ok(())
}
#[test]
fn parse_input_no_transactions() {
parse_input("").unwrap_err();
}
#[test]
fn parse_input_no_transactions_with_config() {
parse_input("//! no-run: verifier").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_nothing_before_first_empty_transaction() {
parse_input(r"
//! new-transaction
main() {}
").unwrap();
}
#[rustfmt::skip]
#[test]
fn parse_input_config_before_first_empty_transaction() {
parse_input(r"
//! no-run: runtime
//! new-transaction
main() {}
").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn | () {
parse_input(r"
main() {}
//! new-transaction
//! new-transaction
main() {}
").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_empty_transaction_with_config() {
parse_input(r"
main() {}
//! new-transaction
//! sender: default
//! new-transaction
main() {}
").unwrap_err();
}
| parse_input_empty_transaction | identifier_name |
preprocessor_tests.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
errors::*,
preprocessor::{build_transactions, extract_global_config, split_input},
};
fn parse_input(input: &str) -> Result<()> {
let config = extract_global_config("".lines(), false)?;
let (_, transactions) = split_input(input.lines(), &config)?;
build_transactions(&config, &transactions)?;
Ok(())
}
#[test]
fn parse_input_no_transactions() |
#[test]
fn parse_input_no_transactions_with_config() {
parse_input("//! no-run: verifier").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_nothing_before_first_empty_transaction() {
parse_input(r"
//! new-transaction
main() {}
").unwrap();
}
#[rustfmt::skip]
#[test]
fn parse_input_config_before_first_empty_transaction() {
parse_input(r"
//! no-run: runtime
//! new-transaction
main() {}
").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_empty_transaction() {
parse_input(r"
main() {}
//! new-transaction
//! new-transaction
main() {}
").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_empty_transaction_with_config() {
parse_input(r"
main() {}
//! new-transaction
//! sender: default
//! new-transaction
main() {}
").unwrap_err();
}
| {
parse_input("").unwrap_err();
} | identifier_body |
preprocessor_tests.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
errors::*,
preprocessor::{build_transactions, extract_global_config, split_input},
};
| build_transactions(&config, &transactions)?;
Ok(())
}
#[test]
fn parse_input_no_transactions() {
parse_input("").unwrap_err();
}
#[test]
fn parse_input_no_transactions_with_config() {
parse_input("//! no-run: verifier").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_nothing_before_first_empty_transaction() {
parse_input(r"
//! new-transaction
main() {}
").unwrap();
}
#[rustfmt::skip]
#[test]
fn parse_input_config_before_first_empty_transaction() {
parse_input(r"
//! no-run: runtime
//! new-transaction
main() {}
").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_empty_transaction() {
parse_input(r"
main() {}
//! new-transaction
//! new-transaction
main() {}
").unwrap_err();
}
#[rustfmt::skip]
#[test]
fn parse_input_empty_transaction_with_config() {
parse_input(r"
main() {}
//! new-transaction
//! sender: default
//! new-transaction
main() {}
").unwrap_err();
} | fn parse_input(input: &str) -> Result<()> {
let config = extract_global_config("".lines(), false)?;
let (_, transactions) = split_input(input.lines(), &config)?; | random_line_split |
boxes.rs | // This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::fmt;
macro_rules! box_database {
($($boxenum:ident $boxtype:expr),*,) => {
#[derive(Clone, Copy, PartialEq)]
pub enum BoxType {
$($boxenum),*,
UnknownBox(u32),
}
| impl From<u32> for BoxType {
fn from(t: u32) -> BoxType {
use self::BoxType::*;
match t {
$($boxtype => $boxenum),*,
_ => UnknownBox(t),
}
}
}
impl Into<u32> for BoxType {
fn into(self) -> u32 {
use self::BoxType::*;
match self {
$($boxenum => $boxtype),*,
UnknownBox(t) => t,
}
}
}
impl fmt::Debug for BoxType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let fourcc: FourCC = From::from(self.clone());
write!(f, "{}", fourcc)
}
}
}
}
#[derive(Default, PartialEq, Clone)]
pub struct FourCC {
pub value: String
}
impl From<u32> for FourCC {
fn from(number: u32) -> FourCC {
let mut box_chars = Vec::new();
for x in 0..4 {
let c = (number >> (x * 8) & 0x0000_00FF) as u8;
box_chars.push(c);
}
box_chars.reverse();
let box_string = match String::from_utf8(box_chars) {
Ok(t) => t,
_ => String::from("null"), // error to retrieve fourcc
};
FourCC {
value: box_string
}
}
}
impl From<BoxType> for FourCC {
fn from(t: BoxType) -> FourCC {
let box_num: u32 = Into::into(t);
From::from(box_num)
}
}
impl<'a> From<&'a str> for FourCC {
fn from(v: &'a str) -> FourCC {
FourCC {
value: v.to_owned()
}
}
}
impl fmt::Debug for FourCC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
impl fmt::Display for FourCC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
box_database!(
FileTypeBox 0x6674_7970, // "ftyp"
MovieBox 0x6d6f_6f76, // "moov"
MovieHeaderBox 0x6d76_6864, // "mvhd"
TrackBox 0x7472_616b, // "trak"
TrackHeaderBox 0x746b_6864, // "tkhd"
EditBox 0x6564_7473, // "edts"
MediaBox 0x6d64_6961, // "mdia"
EditListBox 0x656c_7374, // "elst"
MediaHeaderBox 0x6d64_6864, // "mdhd"
HandlerBox 0x6864_6c72, // "hdlr"
MediaInformationBox 0x6d69_6e66, // "minf"
SampleTableBox 0x7374_626c, // "stbl"
SampleDescriptionBox 0x7374_7364, // "stsd"
TimeToSampleBox 0x7374_7473, // "stts"
SampleToChunkBox 0x7374_7363, // "stsc"
SampleSizeBox 0x7374_737a, // "stsz"
ChunkOffsetBox 0x7374_636f, // "stco"
ChunkLargeOffsetBox 0x636f_3634, // "co64"
SyncSampleBox 0x7374_7373, // "stss"
AVCSampleEntry 0x6176_6331, // "avc1"
AVC3SampleEntry 0x6176_6333, // "avc3" - Need to check official name in spec.
AVCConfigurationBox 0x6176_6343, // "avcC"
MP4AudioSampleEntry 0x6d70_3461, // "mp4a"
MP4VideoSampleEntry 0x6d70_3476, // "mp4v"
ESDBox 0x6573_6473, // "esds"
VP8SampleEntry 0x7670_3038, // "vp08"
VP9SampleEntry 0x7670_3039, // "vp09"
VPCodecConfigurationBox 0x7670_6343, // "vpcC"
AV1SampleEntry 0x6176_3031, // "av01"
AV1CodecConfigurationBox 0x6176_3143, // "av1C"
FLACSampleEntry 0x664c_6143, // "fLaC"
FLACSpecificBox 0x6466_4c61, // "dfLa"
OpusSampleEntry 0x4f70_7573, // "Opus"
OpusSpecificBox 0x644f_7073, // "dOps"
ProtectedVisualSampleEntry 0x656e_6376, // "encv" - Need to check official name in spec.
ProtectedAudioSampleEntry 0x656e_6361, // "enca" - Need to check official name in spec.
MovieExtendsBox 0x6d76_6578, // "mvex"
MovieExtendsHeaderBox 0x6d65_6864, // "mehd"
QTWaveAtom 0x7761_7665, // "wave" - quicktime atom
ProtectionSystemSpecificHeaderBox 0x7073_7368, // "pssh"
SchemeInformationBox 0x7363_6869, // "schi"
TrackEncryptionBox 0x7465_6e63, // "tenc"
ProtectionSchemeInformationBox 0x7369_6e66, // "sinf"
OriginalFormatBox 0x6672_6d61, // "frma"
SchemeTypeBox 0x7363_686d, // "schm"
MP3AudioSampleEntry 0x2e6d_7033, // ".mp3" - from F4V.
CompositionOffsetBox 0x6374_7473, // "ctts"
LPCMAudioSampleEntry 0x6C70_636D, // "lpcm" - quicktime atom
ALACSpecificBox 0x616C_6163, // "alac" - Also used by ALACSampleEntry
UuidBox 0x7575_6964, // "uuid"
MetadataBox 0x6d65_7461, // "meta"
MetadataHeaderBox 0x6d68_6472, // "mhdr"
MetadataItemKeysBox 0x6b65_7973, // "keys"
MetadataItemListEntry 0x696c_7374, // "ilst"
MetadataItemDataEntry 0x6461_7461, // "data"
MetadataItemNameBox 0x6e61_6d65, // "name"
MetadataItemInformationBox 0x6974_6966, // "itif"
UserdataBox 0x7564_7461, // "udta"
AlbumEntry 0xa961_6c62, // "©alb"
ArtistEntry 0xa941_5254, // "©ART"
ArtistLowercaseEntry 0xa961_7274, // "©art"
AlbumArtistEntry 0x6141_5254, // "aART"
CommentEntry 0xa963_6d74, // "©cmt"
DateEntry 0xa964_6179, // "©day"
TitleEntry 0xa96e_616d, // "©nam"
CustomGenreEntry 0xa967_656e, // "©gen"
StandardGenreEntry 0x676e_7265, // "gnre"
TrackNumberEntry 0x7472_6b6e, // "trkn"
DiskNumberEntry 0x6469_736b, // "disk"
ComposerEntry 0xa977_7274, // "©wrt"
EncoderEntry 0xa974_6f6f, // "©too"
EncodedByEntry 0xa965_6e63, // "©enc"
TempoEntry 0x746d_706f, // "tmpo"
CopyrightEntry 0x6370_7274, // "cprt"
CompilationEntry 0x6370_696c, // "cpil"
CoverArtEntry 0x636f_7672, // "covr"
AdvisoryEntry 0x7274_6e67, // "rtng"
RatingEntry 0x7261_7465, // "rate"
GroupingEntry 0xa967_7270, // "©grp"
MediaTypeEntry 0x7374_696b, // "stik"
PodcastEntry 0x7063_7374, // "pcst"
CategoryEntry 0x6361_7467, // "catg"
KeywordEntry 0x6b65_7977, // "keyw"
PodcastUrlEntry 0x7075_726c, // "purl"
PodcastGuidEntry 0x6567_6964, // "egid"
DescriptionEntry 0x6465_7363, // "desc"
LongDescriptionEntry 0x6c64_6573, // "ldes"
LyricsEntry 0xa96c_7972, // "©lyr"
TVNetworkNameEntry 0x7476_6e6e, // "tvnn"
TVShowNameEntry 0x7476_7368, // "tvsh"
TVEpisodeNameEntry 0x7476_656e, // "tven"
TVSeasonNumberEntry 0x7476_736e, // "tvsn"
TVEpisodeNumberEntry 0x7476_6573, // "tves"
PurchaseDateEntry 0x7075_7264, // "purd"
GaplessPlaybackEntry 0x7067_6170, // "pgap"
OwnerEntry 0x6f77_6e72, // "ownr"
HDVideoEntry 0x6864_7664, // "hdvd"
SortNameEntry 0x736f_6e6d, // "sonm"
SortAlbumEntry 0x736f_616c, // "soal"
SortArtistEntry 0x736f_6172, // "soar"
SortAlbumArtistEntry 0x736f_6161, // "soaa"
SortComposerEntry 0x736f_636f, // "soco"
); | random_line_split |
|
boxes.rs | // This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::fmt;
macro_rules! box_database {
($($boxenum:ident $boxtype:expr),*,) => {
#[derive(Clone, Copy, PartialEq)]
pub enum BoxType {
$($boxenum),*,
UnknownBox(u32),
}
impl From<u32> for BoxType {
fn from(t: u32) -> BoxType {
use self::BoxType::*;
match t {
$($boxtype => $boxenum),*,
_ => UnknownBox(t),
}
}
}
impl Into<u32> for BoxType {
fn into(self) -> u32 {
use self::BoxType::*;
match self {
$($boxenum => $boxtype),*,
UnknownBox(t) => t,
}
}
}
impl fmt::Debug for BoxType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let fourcc: FourCC = From::from(self.clone());
write!(f, "{}", fourcc)
}
}
}
}
#[derive(Default, PartialEq, Clone)]
pub struct FourCC {
pub value: String
}
impl From<u32> for FourCC {
fn from(number: u32) -> FourCC {
let mut box_chars = Vec::new();
for x in 0..4 {
let c = (number >> (x * 8) & 0x0000_00FF) as u8;
box_chars.push(c);
}
box_chars.reverse();
let box_string = match String::from_utf8(box_chars) {
Ok(t) => t,
_ => String::from("null"), // error to retrieve fourcc
};
FourCC {
value: box_string
}
}
}
impl From<BoxType> for FourCC {
fn | (t: BoxType) -> FourCC {
let box_num: u32 = Into::into(t);
From::from(box_num)
}
}
impl<'a> From<&'a str> for FourCC {
fn from(v: &'a str) -> FourCC {
FourCC {
value: v.to_owned()
}
}
}
impl fmt::Debug for FourCC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
impl fmt::Display for FourCC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
box_database!(
FileTypeBox 0x6674_7970, // "ftyp"
MovieBox 0x6d6f_6f76, // "moov"
MovieHeaderBox 0x6d76_6864, // "mvhd"
TrackBox 0x7472_616b, // "trak"
TrackHeaderBox 0x746b_6864, // "tkhd"
EditBox 0x6564_7473, // "edts"
MediaBox 0x6d64_6961, // "mdia"
EditListBox 0x656c_7374, // "elst"
MediaHeaderBox 0x6d64_6864, // "mdhd"
HandlerBox 0x6864_6c72, // "hdlr"
MediaInformationBox 0x6d69_6e66, // "minf"
SampleTableBox 0x7374_626c, // "stbl"
SampleDescriptionBox 0x7374_7364, // "stsd"
TimeToSampleBox 0x7374_7473, // "stts"
SampleToChunkBox 0x7374_7363, // "stsc"
SampleSizeBox 0x7374_737a, // "stsz"
ChunkOffsetBox 0x7374_636f, // "stco"
ChunkLargeOffsetBox 0x636f_3634, // "co64"
SyncSampleBox 0x7374_7373, // "stss"
AVCSampleEntry 0x6176_6331, // "avc1"
AVC3SampleEntry 0x6176_6333, // "avc3" - Need to check official name in spec.
AVCConfigurationBox 0x6176_6343, // "avcC"
MP4AudioSampleEntry 0x6d70_3461, // "mp4a"
MP4VideoSampleEntry 0x6d70_3476, // "mp4v"
ESDBox 0x6573_6473, // "esds"
VP8SampleEntry 0x7670_3038, // "vp08"
VP9SampleEntry 0x7670_3039, // "vp09"
VPCodecConfigurationBox 0x7670_6343, // "vpcC"
AV1SampleEntry 0x6176_3031, // "av01"
AV1CodecConfigurationBox 0x6176_3143, // "av1C"
FLACSampleEntry 0x664c_6143, // "fLaC"
FLACSpecificBox 0x6466_4c61, // "dfLa"
OpusSampleEntry 0x4f70_7573, // "Opus"
OpusSpecificBox 0x644f_7073, // "dOps"
ProtectedVisualSampleEntry 0x656e_6376, // "encv" - Need to check official name in spec.
ProtectedAudioSampleEntry 0x656e_6361, // "enca" - Need to check official name in spec.
MovieExtendsBox 0x6d76_6578, // "mvex"
MovieExtendsHeaderBox 0x6d65_6864, // "mehd"
QTWaveAtom 0x7761_7665, // "wave" - quicktime atom
ProtectionSystemSpecificHeaderBox 0x7073_7368, // "pssh"
SchemeInformationBox 0x7363_6869, // "schi"
TrackEncryptionBox 0x7465_6e63, // "tenc"
ProtectionSchemeInformationBox 0x7369_6e66, // "sinf"
OriginalFormatBox 0x6672_6d61, // "frma"
SchemeTypeBox 0x7363_686d, // "schm"
MP3AudioSampleEntry 0x2e6d_7033, // ".mp3" - from F4V.
CompositionOffsetBox 0x6374_7473, // "ctts"
LPCMAudioSampleEntry 0x6C70_636D, // "lpcm" - quicktime atom
ALACSpecificBox 0x616C_6163, // "alac" - Also used by ALACSampleEntry
UuidBox 0x7575_6964, // "uuid"
MetadataBox 0x6d65_7461, // "meta"
MetadataHeaderBox 0x6d68_6472, // "mhdr"
MetadataItemKeysBox 0x6b65_7973, // "keys"
MetadataItemListEntry 0x696c_7374, // "ilst"
MetadataItemDataEntry 0x6461_7461, // "data"
MetadataItemNameBox 0x6e61_6d65, // "name"
MetadataItemInformationBox 0x6974_6966, // "itif"
UserdataBox 0x7564_7461, // "udta"
AlbumEntry 0xa961_6c62, // "©alb"
ArtistEntry 0xa941_5254, // "©ART"
ArtistLowercaseEntry 0xa961_7274, // "©art"
AlbumArtistEntry 0x6141_5254, // "aART"
CommentEntry 0xa963_6d74, // "©cmt"
DateEntry 0xa964_6179, // "©day"
TitleEntry 0xa96e_616d, // "©nam"
CustomGenreEntry 0xa967_656e, // "©gen"
StandardGenreEntry 0x676e_7265, // "gnre"
TrackNumberEntry 0x7472_6b6e, // "trkn"
DiskNumberEntry 0x6469_736b, // "disk"
ComposerEntry 0xa977_7274, // "©wrt"
EncoderEntry 0xa974_6f6f, // "©too"
EncodedByEntry 0xa965_6e63, // "©enc"
TempoEntry 0x746d_706f, // "tmpo"
CopyrightEntry 0x6370_7274, // "cprt"
CompilationEntry 0x6370_696c, // "cpil"
CoverArtEntry 0x636f_7672, // "covr"
AdvisoryEntry 0x7274_6e67, // "rtng"
RatingEntry 0x7261_7465, // "rate"
GroupingEntry 0xa967_7270, // "©grp"
MediaTypeEntry 0x7374_696b, // "stik"
PodcastEntry 0x7063_7374, // "pcst"
CategoryEntry 0x6361_7467, // "catg"
KeywordEntry 0x6b65_7977, // "keyw"
PodcastUrlEntry 0x7075_726c, // "purl"
PodcastGuidEntry 0x6567_6964, // "egid"
DescriptionEntry 0x6465_7363, // "desc"
LongDescriptionEntry 0x6c64_6573, // "ldes"
LyricsEntry 0xa96c_7972, // "©lyr"
TVNetworkNameEntry 0x7476_6e6e, // "tvnn"
TVShowNameEntry 0x7476_7368, // "tvsh"
TVEpisodeNameEntry 0x7476_656e, // "tven"
TVSeasonNumberEntry 0x7476_736e, // "tvsn"
TVEpisodeNumberEntry 0x7476_6573, // "tves"
PurchaseDateEntry 0x7075_7264, // "purd"
GaplessPlaybackEntry 0x7067_6170, // "pgap"
OwnerEntry 0x6f77_6e72, // "ownr"
HDVideoEntry 0x6864_7664, // "hdvd"
SortNameEntry 0x736f_6e6d, // "sonm"
SortAlbumEntry 0x736f_616c, // "soal"
SortArtistEntry 0x736f_6172, // "soar"
SortAlbumArtistEntry 0x736f_6161, // "soaa"
SortComposerEntry 0x736f_636f, // "soco"
);
| from | identifier_name |
boxes.rs | // This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::fmt;
macro_rules! box_database {
($($boxenum:ident $boxtype:expr),*,) => {
#[derive(Clone, Copy, PartialEq)]
pub enum BoxType {
$($boxenum),*,
UnknownBox(u32),
}
impl From<u32> for BoxType {
fn from(t: u32) -> BoxType {
use self::BoxType::*;
match t {
$($boxtype => $boxenum),*,
_ => UnknownBox(t),
}
}
}
impl Into<u32> for BoxType {
fn into(self) -> u32 {
use self::BoxType::*;
match self {
$($boxenum => $boxtype),*,
UnknownBox(t) => t,
}
}
}
impl fmt::Debug for BoxType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let fourcc: FourCC = From::from(self.clone());
write!(f, "{}", fourcc)
}
}
}
}
#[derive(Default, PartialEq, Clone)]
pub struct FourCC {
pub value: String
}
impl From<u32> for FourCC {
fn from(number: u32) -> FourCC |
}
impl From<BoxType> for FourCC {
fn from(t: BoxType) -> FourCC {
let box_num: u32 = Into::into(t);
From::from(box_num)
}
}
impl<'a> From<&'a str> for FourCC {
fn from(v: &'a str) -> FourCC {
FourCC {
value: v.to_owned()
}
}
}
impl fmt::Debug for FourCC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
impl fmt::Display for FourCC {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.value)
}
}
box_database!(
FileTypeBox 0x6674_7970, // "ftyp"
MovieBox 0x6d6f_6f76, // "moov"
MovieHeaderBox 0x6d76_6864, // "mvhd"
TrackBox 0x7472_616b, // "trak"
TrackHeaderBox 0x746b_6864, // "tkhd"
EditBox 0x6564_7473, // "edts"
MediaBox 0x6d64_6961, // "mdia"
EditListBox 0x656c_7374, // "elst"
MediaHeaderBox 0x6d64_6864, // "mdhd"
HandlerBox 0x6864_6c72, // "hdlr"
MediaInformationBox 0x6d69_6e66, // "minf"
SampleTableBox 0x7374_626c, // "stbl"
SampleDescriptionBox 0x7374_7364, // "stsd"
TimeToSampleBox 0x7374_7473, // "stts"
SampleToChunkBox 0x7374_7363, // "stsc"
SampleSizeBox 0x7374_737a, // "stsz"
ChunkOffsetBox 0x7374_636f, // "stco"
ChunkLargeOffsetBox 0x636f_3634, // "co64"
SyncSampleBox 0x7374_7373, // "stss"
AVCSampleEntry 0x6176_6331, // "avc1"
AVC3SampleEntry 0x6176_6333, // "avc3" - Need to check official name in spec.
AVCConfigurationBox 0x6176_6343, // "avcC"
MP4AudioSampleEntry 0x6d70_3461, // "mp4a"
MP4VideoSampleEntry 0x6d70_3476, // "mp4v"
ESDBox 0x6573_6473, // "esds"
VP8SampleEntry 0x7670_3038, // "vp08"
VP9SampleEntry 0x7670_3039, // "vp09"
VPCodecConfigurationBox 0x7670_6343, // "vpcC"
AV1SampleEntry 0x6176_3031, // "av01"
AV1CodecConfigurationBox 0x6176_3143, // "av1C"
FLACSampleEntry 0x664c_6143, // "fLaC"
FLACSpecificBox 0x6466_4c61, // "dfLa"
OpusSampleEntry 0x4f70_7573, // "Opus"
OpusSpecificBox 0x644f_7073, // "dOps"
ProtectedVisualSampleEntry 0x656e_6376, // "encv" - Need to check official name in spec.
ProtectedAudioSampleEntry 0x656e_6361, // "enca" - Need to check official name in spec.
MovieExtendsBox 0x6d76_6578, // "mvex"
MovieExtendsHeaderBox 0x6d65_6864, // "mehd"
QTWaveAtom 0x7761_7665, // "wave" - quicktime atom
ProtectionSystemSpecificHeaderBox 0x7073_7368, // "pssh"
SchemeInformationBox 0x7363_6869, // "schi"
TrackEncryptionBox 0x7465_6e63, // "tenc"
ProtectionSchemeInformationBox 0x7369_6e66, // "sinf"
OriginalFormatBox 0x6672_6d61, // "frma"
SchemeTypeBox 0x7363_686d, // "schm"
MP3AudioSampleEntry 0x2e6d_7033, // ".mp3" - from F4V.
CompositionOffsetBox 0x6374_7473, // "ctts"
LPCMAudioSampleEntry 0x6C70_636D, // "lpcm" - quicktime atom
ALACSpecificBox 0x616C_6163, // "alac" - Also used by ALACSampleEntry
UuidBox 0x7575_6964, // "uuid"
MetadataBox 0x6d65_7461, // "meta"
MetadataHeaderBox 0x6d68_6472, // "mhdr"
MetadataItemKeysBox 0x6b65_7973, // "keys"
MetadataItemListEntry 0x696c_7374, // "ilst"
MetadataItemDataEntry 0x6461_7461, // "data"
MetadataItemNameBox 0x6e61_6d65, // "name"
MetadataItemInformationBox 0x6974_6966, // "itif"
UserdataBox 0x7564_7461, // "udta"
AlbumEntry 0xa961_6c62, // "©alb"
ArtistEntry 0xa941_5254, // "©ART"
ArtistLowercaseEntry 0xa961_7274, // "©art"
AlbumArtistEntry 0x6141_5254, // "aART"
CommentEntry 0xa963_6d74, // "©cmt"
DateEntry 0xa964_6179, // "©day"
TitleEntry 0xa96e_616d, // "©nam"
CustomGenreEntry 0xa967_656e, // "©gen"
StandardGenreEntry 0x676e_7265, // "gnre"
TrackNumberEntry 0x7472_6b6e, // "trkn"
DiskNumberEntry 0x6469_736b, // "disk"
ComposerEntry 0xa977_7274, // "©wrt"
EncoderEntry 0xa974_6f6f, // "©too"
EncodedByEntry 0xa965_6e63, // "©enc"
TempoEntry 0x746d_706f, // "tmpo"
CopyrightEntry 0x6370_7274, // "cprt"
CompilationEntry 0x6370_696c, // "cpil"
CoverArtEntry 0x636f_7672, // "covr"
AdvisoryEntry 0x7274_6e67, // "rtng"
RatingEntry 0x7261_7465, // "rate"
GroupingEntry 0xa967_7270, // "©grp"
MediaTypeEntry 0x7374_696b, // "stik"
PodcastEntry 0x7063_7374, // "pcst"
CategoryEntry 0x6361_7467, // "catg"
KeywordEntry 0x6b65_7977, // "keyw"
PodcastUrlEntry 0x7075_726c, // "purl"
PodcastGuidEntry 0x6567_6964, // "egid"
DescriptionEntry 0x6465_7363, // "desc"
LongDescriptionEntry 0x6c64_6573, // "ldes"
LyricsEntry 0xa96c_7972, // "©lyr"
TVNetworkNameEntry 0x7476_6e6e, // "tvnn"
TVShowNameEntry 0x7476_7368, // "tvsh"
TVEpisodeNameEntry 0x7476_656e, // "tven"
TVSeasonNumberEntry 0x7476_736e, // "tvsn"
TVEpisodeNumberEntry 0x7476_6573, // "tves"
PurchaseDateEntry 0x7075_7264, // "purd"
GaplessPlaybackEntry 0x7067_6170, // "pgap"
OwnerEntry 0x6f77_6e72, // "ownr"
HDVideoEntry 0x6864_7664, // "hdvd"
SortNameEntry 0x736f_6e6d, // "sonm"
SortAlbumEntry 0x736f_616c, // "soal"
SortArtistEntry 0x736f_6172, // "soar"
SortAlbumArtistEntry 0x736f_6161, // "soaa"
SortComposerEntry 0x736f_636f, // "soco"
);
| {
let mut box_chars = Vec::new();
for x in 0..4 {
let c = (number >> (x * 8) & 0x0000_00FF) as u8;
box_chars.push(c);
}
box_chars.reverse();
let box_string = match String::from_utf8(box_chars) {
Ok(t) => t,
_ => String::from("null"), // error to retrieve fourcc
};
FourCC {
value: box_string
}
} | identifier_body |
main.ng.rs | //! Multivariate linear regression using gradient descent
//!
//! Model:
//!
//! ```
//! y = x * theta + e
//!
//! y Dependent variable (scalar)
//! x Independent variables (1-by-n matrix)
//! theta Parameters to estimate (n-by-1 matrix)
//! e Error (scalar)
//! ```
//!
//! Cost function: Half of the mean squared error (MSE)
//!
//! ```
//! E = X * theta - Y
//! J = E' * E / 2 / m
//!
//! E Error per observation (m-by-1 matrix)
//! X Observed independent variables (m-by-n matrix)
//! Y Observed dependent variables (m-by-1 matrix)
//! m Number of observations (integer)
//! theta Parameters to estimate (n-by-1 matrix)
//! ```
//!
//! Estimator: Gradient descent
//!
//! ```
//! loop
//! E = X * theta - Y
//! theta = theta - alpha / m * X' * E
//! until stop_condition
//!
//! E (m-by-1 matrix)
//! X (m-by-n matrix)
//! Y (m-by-1 matrix)
//! alpha Step size (scalar)
//! theta (n-by-1 matrix)
//! ```
#![allow(non_snake_case)]
#![deny(warnings)]
#![feature(plugin)]
#![plugin(linalg_macros)]
extern crate cast;
extern crate env_logger;
extern crate linalg;
extern crate lines;
extern crate stats;
extern crate time;
#[macro_use]
extern crate log;
use std::fs::File;
use std::io::{BufReader, self};
use std::path::Path;
use cast::From as _0;
use linalg::prelude::*;
use linalg::{Col, ColMut, SubMat, SubMatMut, Transposed};
use lines::Lines;
use stats::univariate::Sample;
macro_rules! timeit {
($msg:expr, $e:expr) => {{
let now = time::precise_time_ns();
let out = $e;
let elapsed = time::precise_time_ns() - now;
println!(concat!($msg, " took {} ms"), f64::from_(elapsed) / 1_000_000.);
out
}}
}
fn main() {
env_logger::init().unwrap();
// Some dummy operation to force the initialization of OpenBLAS' runtime (~90 ms) here rather
// than during the measurements below
(&mat![1., 2.; 3., 4.].inv() * &mat![1., 2.; 3., 4.]).eval();
let data = timeit!("Loading data", {
load("mpg.tsv").unwrap()
});
// Number of observations
let m = data.nrows();
println!("{} observations", m);
// Number of independent variables
let n = data.ncols() - 1;
println!("{} independent variables\n", n);
let mut X = Mat::ones((m, n + 1));
X[.., 1..] = data[.., 1..];
let y = data.col(0);
let (mu, sigma) = timeit!("Normalization", {
normalize(&mut X[.., 1..])
});
println!("mean: {:?}", mu);
println!("std deviation: {:?}\n", sigma);
let ref mut theta = ColVec::zeros(n + 1);
let alpha = 0.01;
let max_niters = 100_000;
let niters = timeit!("Gradient descent", {
descent(&X, y, theta, alpha, max_niters)
});
println!("Estimated parameters: {:?}", theta);
println!("Iterations required: {}", niters);
}
/// Evaluates the cost function for `theta`
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// z (m, 1) Auxiliary buffer to avoid allocating
fn cost(X: &SubMat<f64>, y: &Col<f64>, theta: &Col<f64>, mut z: &mut Col<f64>) -> f64 {
let m = f64::from_(X.nrows());
z[..] = y - X * theta;
let e = &*z;
e.t() * e / 2. / m
}
/// Normalizes the independent variables
///
/// X (m, n)
///
/// -> Returns a vector of means and a vector of standard deviations
fn normalize(X: &mut SubMat<f64>) -> (Vec<f64>, Vec<f64>) {
let n = usize::from_(X.ncols());
let mut mu = Vec::with_capacity(n);
let mut sigma = Vec::with_capacity(n);
for col in X.cols_mut() {
let (mean, sd) = {
let sample = Sample::new(col.as_slice().unwrap());
let mean = sample.mean();
(mean, sample.std_dev(Some(mean)))
};
mu.push(mean);
sigma.push(sd);
*col -= mean;
*col /= sd;
}
(mu, sigma)
}
/// Performs the gradient descent algorithm to find the value of `theta` that minimizes the cost
/// function.
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// alpha scalar Step size
/// max_niters integer Maximum number of iterations
///
/// -> Returns the number of iterations required to converge to a solution
fn descent(
X: &SubMat<f64>,
y: &Col<f64>,
theta: &mut Col<f64>,
alpha: f64,
max_niters: u32,
) -> u32 {
const TOL: f64 = 1e-5;
let m = f64::from_(X.nrows());
// Pre-allocate a column vector to avoid allocations in the loop
let ref mut z = ColVec::zeros(X.nrows());
let mut last_J = cost(X, y, theta, z);
for i in 0..max_niters {
// z = e = y - X * theta
z[..] = y - X * theta;
// theta = theta + alpha / m * x' * e
*theta += alpha * X.t() * &*z / m;
let J = cost(X, y, theta, z);
debug!("i: {}, J: {}, theta: {:?}", i, J, theta);
// Stop condition: `cost` reduced by less than `TOL`% in last iteration
if (J - last_J).abs() / J.max(last_J) < TOL {
return i
}
last_J = J;
}
max_niters
}
/// Loads data from a TSV file
fn load<P>(path: P) -> io::Result<Transposed<Mat<f64>>> where P: AsRef<Path> {
fn | (path: &Path) -> io::Result<Transposed<Mat<f64>>> {
let mut lines = Lines::from(BufReader::new(try!(File::open(path))));
let mut v = vec![];
let ncols = {
let mut ncols = 0;
for number in try!(lines.next().unwrap()).split_whitespace() {
ncols += 1;
v.push(number.parse().unwrap());
}
ncols
};
let mut nrows = 1;
while let Some(line) = lines.next() {
let line = try!(line);
for number in line.split_whitespace() {
v.push(number.parse().unwrap());
}
nrows += 1;
}
unsafe {
Ok(Mat::from_raw_parts(v.into_boxed_slice(), (ncols, nrows)).t())
}
}
load(path.as_ref())
}
| load | identifier_name |
main.ng.rs | //! Multivariate linear regression using gradient descent
//!
//! Model:
//!
//! ```
//! y = x * theta + e
//!
//! y Dependent variable (scalar)
//! x Independent variables (1-by-n matrix)
//! theta Parameters to estimate (n-by-1 matrix)
//! e Error (scalar)
//! ```
//!
//! Cost function: Half of the mean squared error (MSE)
//!
//! ```
//! E = X * theta - Y
//! J = E' * E / 2 / m
//!
//! E Error per observation (m-by-1 matrix)
//! X Observed independent variables (m-by-n matrix)
//! Y Observed dependent variables (m-by-1 matrix)
//! m Number of observations (integer)
//! theta Parameters to estimate (n-by-1 matrix)
//! ```
//!
//! Estimator: Gradient descent
//!
//! ```
//! loop
//! E = X * theta - Y
//! theta = theta - alpha / m * X' * E
//! until stop_condition
//!
//! E (m-by-1 matrix)
//! X (m-by-n matrix)
//! Y (m-by-1 matrix)
//! alpha Step size (scalar)
//! theta (n-by-1 matrix)
//! ```
#![allow(non_snake_case)]
#![deny(warnings)]
#![feature(plugin)]
#![plugin(linalg_macros)]
extern crate cast;
extern crate env_logger;
extern crate linalg;
extern crate lines;
extern crate stats;
extern crate time;
#[macro_use]
extern crate log;
use std::fs::File;
use std::io::{BufReader, self};
use std::path::Path;
use cast::From as _0;
use linalg::prelude::*;
use linalg::{Col, ColMut, SubMat, SubMatMut, Transposed};
use lines::Lines;
use stats::univariate::Sample;
macro_rules! timeit {
($msg:expr, $e:expr) => {{
let now = time::precise_time_ns();
let out = $e;
let elapsed = time::precise_time_ns() - now;
println!(concat!($msg, " took {} ms"), f64::from_(elapsed) / 1_000_000.);
out
}}
}
fn main() {
env_logger::init().unwrap();
// Some dummy operation to force the initialization of OpenBLAS' runtime (~90 ms) here rather
// than during the measurements below
(&mat![1., 2.; 3., 4.].inv() * &mat![1., 2.; 3., 4.]).eval();
let data = timeit!("Loading data", {
load("mpg.tsv").unwrap()
});
// Number of observations
let m = data.nrows();
println!("{} observations", m);
// Number of independent variables
let n = data.ncols() - 1;
println!("{} independent variables\n", n);
let mut X = Mat::ones((m, n + 1));
X[.., 1..] = data[.., 1..];
let y = data.col(0);
let (mu, sigma) = timeit!("Normalization", {
normalize(&mut X[.., 1..])
});
println!("mean: {:?}", mu);
println!("std deviation: {:?}\n", sigma);
let ref mut theta = ColVec::zeros(n + 1);
let alpha = 0.01;
let max_niters = 100_000;
let niters = timeit!("Gradient descent", {
descent(&X, y, theta, alpha, max_niters)
});
println!("Estimated parameters: {:?}", theta);
println!("Iterations required: {}", niters);
}
/// Evaluates the cost function for `theta`
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// z (m, 1) Auxiliary buffer to avoid allocating
fn cost(X: &SubMat<f64>, y: &Col<f64>, theta: &Col<f64>, mut z: &mut Col<f64>) -> f64 {
let m = f64::from_(X.nrows());
z[..] = y - X * theta;
let e = &*z;
e.t() * e / 2. / m
}
/// Normalizes the independent variables
///
/// X (m, n)
///
/// -> Returns a vector of means and a vector of standard deviations
fn normalize(X: &mut SubMat<f64>) -> (Vec<f64>, Vec<f64>) {
let n = usize::from_(X.ncols());
let mut mu = Vec::with_capacity(n);
let mut sigma = Vec::with_capacity(n);
for col in X.cols_mut() {
let (mean, sd) = {
let sample = Sample::new(col.as_slice().unwrap());
let mean = sample.mean();
(mean, sample.std_dev(Some(mean)))
};
mu.push(mean);
sigma.push(sd);
*col -= mean;
*col /= sd;
}
(mu, sigma)
}
/// Performs the gradient descent algorithm to find the value of `theta` that minimizes the cost
/// function.
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// alpha scalar Step size
/// max_niters integer Maximum number of iterations
///
/// -> Returns the number of iterations required to converge to a solution
fn descent(
X: &SubMat<f64>,
y: &Col<f64>,
theta: &mut Col<f64>,
alpha: f64,
max_niters: u32,
) -> u32 {
const TOL: f64 = 1e-5;
let m = f64::from_(X.nrows());
// Pre-allocate a column vector to avoid allocations in the loop
let ref mut z = ColVec::zeros(X.nrows());
let mut last_J = cost(X, y, theta, z);
for i in 0..max_niters {
// z = e = y - X * theta
z[..] = y - X * theta;
// theta = theta + alpha / m * x' * e
*theta += alpha * X.t() * &*z / m;
let J = cost(X, y, theta, z);
debug!("i: {}, J: {}, theta: {:?}", i, J, theta);
// Stop condition: `cost` reduced by less than `TOL`% in last iteration
if (J - last_J).abs() / J.max(last_J) < TOL |
last_J = J;
}
max_niters
}
/// Loads data from a TSV file
fn load<P>(path: P) -> io::Result<Transposed<Mat<f64>>> where P: AsRef<Path> {
fn load(path: &Path) -> io::Result<Transposed<Mat<f64>>> {
let mut lines = Lines::from(BufReader::new(try!(File::open(path))));
let mut v = vec![];
let ncols = {
let mut ncols = 0;
for number in try!(lines.next().unwrap()).split_whitespace() {
ncols += 1;
v.push(number.parse().unwrap());
}
ncols
};
let mut nrows = 1;
while let Some(line) = lines.next() {
let line = try!(line);
for number in line.split_whitespace() {
v.push(number.parse().unwrap());
}
nrows += 1;
}
unsafe {
Ok(Mat::from_raw_parts(v.into_boxed_slice(), (ncols, nrows)).t())
}
}
load(path.as_ref())
}
| {
return i
} | conditional_block |
main.ng.rs | //! Multivariate linear regression using gradient descent
//!
//! Model:
//!
//! ```
//! y = x * theta + e
//!
//! y Dependent variable (scalar)
//! x Independent variables (1-by-n matrix)
//! theta Parameters to estimate (n-by-1 matrix)
//! e Error (scalar)
//! ```
//!
//! Cost function: Half of the mean squared error (MSE)
//!
//! ```
//! E = X * theta - Y
//! J = E' * E / 2 / m
//!
//! E Error per observation (m-by-1 matrix)
//! X Observed independent variables (m-by-n matrix)
//! Y Observed dependent variables (m-by-1 matrix)
//! m Number of observations (integer)
//! theta Parameters to estimate (n-by-1 matrix)
//! ```
//!
//! Estimator: Gradient descent
//!
//! ```
//! loop
//! E = X * theta - Y
//! theta = theta - alpha / m * X' * E
//! until stop_condition
//!
//! E (m-by-1 matrix)
//! X (m-by-n matrix)
//! Y (m-by-1 matrix)
//! alpha Step size (scalar)
//! theta (n-by-1 matrix)
//! ```
#![allow(non_snake_case)]
#![deny(warnings)]
#![feature(plugin)]
#![plugin(linalg_macros)]
extern crate cast;
extern crate env_logger;
extern crate linalg;
extern crate lines;
extern crate stats;
extern crate time;
#[macro_use]
extern crate log;
use std::fs::File;
use std::io::{BufReader, self};
use std::path::Path;
use cast::From as _0;
use linalg::prelude::*;
use linalg::{Col, ColMut, SubMat, SubMatMut, Transposed};
use lines::Lines;
use stats::univariate::Sample;
macro_rules! timeit {
($msg:expr, $e:expr) => {{
let now = time::precise_time_ns();
let out = $e;
let elapsed = time::precise_time_ns() - now;
println!(concat!($msg, " took {} ms"), f64::from_(elapsed) / 1_000_000.);
out
}}
}
fn main() |
let mut X = Mat::ones((m, n + 1));
X[.., 1..] = data[.., 1..];
let y = data.col(0);
let (mu, sigma) = timeit!("Normalization", {
normalize(&mut X[.., 1..])
});
println!("mean: {:?}", mu);
println!("std deviation: {:?}\n", sigma);
let ref mut theta = ColVec::zeros(n + 1);
let alpha = 0.01;
let max_niters = 100_000;
let niters = timeit!("Gradient descent", {
descent(&X, y, theta, alpha, max_niters)
});
println!("Estimated parameters: {:?}", theta);
println!("Iterations required: {}", niters);
}
/// Evaluates the cost function for `theta`
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// z (m, 1) Auxiliary buffer to avoid allocating
fn cost(X: &SubMat<f64>, y: &Col<f64>, theta: &Col<f64>, mut z: &mut Col<f64>) -> f64 {
let m = f64::from_(X.nrows());
z[..] = y - X * theta;
let e = &*z;
e.t() * e / 2. / m
}
/// Normalizes the independent variables
///
/// X (m, n)
///
/// -> Returns a vector of means and a vector of standard deviations
fn normalize(X: &mut SubMat<f64>) -> (Vec<f64>, Vec<f64>) {
let n = usize::from_(X.ncols());
let mut mu = Vec::with_capacity(n);
let mut sigma = Vec::with_capacity(n);
for col in X.cols_mut() {
let (mean, sd) = {
let sample = Sample::new(col.as_slice().unwrap());
let mean = sample.mean();
(mean, sample.std_dev(Some(mean)))
};
mu.push(mean);
sigma.push(sd);
*col -= mean;
*col /= sd;
}
(mu, sigma)
}
/// Performs the gradient descent algorithm to find the value of `theta` that minimizes the cost
/// function.
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// alpha scalar Step size
/// max_niters integer Maximum number of iterations
///
/// -> Returns the number of iterations required to converge to a solution
fn descent(
X: &SubMat<f64>,
y: &Col<f64>,
theta: &mut Col<f64>,
alpha: f64,
max_niters: u32,
) -> u32 {
const TOL: f64 = 1e-5;
let m = f64::from_(X.nrows());
// Pre-allocate a column vector to avoid allocations in the loop
let ref mut z = ColVec::zeros(X.nrows());
let mut last_J = cost(X, y, theta, z);
for i in 0..max_niters {
// z = e = y - X * theta
z[..] = y - X * theta;
// theta = theta + alpha / m * x' * e
*theta += alpha * X.t() * &*z / m;
let J = cost(X, y, theta, z);
debug!("i: {}, J: {}, theta: {:?}", i, J, theta);
// Stop condition: `cost` reduced by less than `TOL`% in last iteration
if (J - last_J).abs() / J.max(last_J) < TOL {
return i
}
last_J = J;
}
max_niters
}
/// Loads data from a TSV file
fn load<P>(path: P) -> io::Result<Transposed<Mat<f64>>> where P: AsRef<Path> {
fn load(path: &Path) -> io::Result<Transposed<Mat<f64>>> {
let mut lines = Lines::from(BufReader::new(try!(File::open(path))));
let mut v = vec![];
let ncols = {
let mut ncols = 0;
for number in try!(lines.next().unwrap()).split_whitespace() {
ncols += 1;
v.push(number.parse().unwrap());
}
ncols
};
let mut nrows = 1;
while let Some(line) = lines.next() {
let line = try!(line);
for number in line.split_whitespace() {
v.push(number.parse().unwrap());
}
nrows += 1;
}
unsafe {
Ok(Mat::from_raw_parts(v.into_boxed_slice(), (ncols, nrows)).t())
}
}
load(path.as_ref())
}
| {
env_logger::init().unwrap();
// Some dummy operation to force the initialization of OpenBLAS' runtime (~90 ms) here rather
// than during the measurements below
(&mat![1., 2.; 3., 4.].inv() * &mat![1., 2.; 3., 4.]).eval();
let data = timeit!("Loading data", {
load("mpg.tsv").unwrap()
});
// Number of observations
let m = data.nrows();
println!("{} observations", m);
// Number of independent variables
let n = data.ncols() - 1;
println!("{} independent variables\n", n); | identifier_body |
main.ng.rs | //! Multivariate linear regression using gradient descent
//!
//! Model:
//!
//! ```
//! y = x * theta + e
//!
//! y Dependent variable (scalar)
//! x Independent variables (1-by-n matrix)
//! theta Parameters to estimate (n-by-1 matrix)
//! e Error (scalar)
//! ```
//!
//! Cost function: Half of the mean squared error (MSE)
//!
//! ```
//! E = X * theta - Y
//! J = E' * E / 2 / m
//!
//! E Error per observation (m-by-1 matrix)
//! X Observed independent variables (m-by-n matrix)
//! Y Observed dependent variables (m-by-1 matrix)
//! m Number of observations (integer)
//! theta Parameters to estimate (n-by-1 matrix)
//! ```
//!
//! Estimator: Gradient descent
//!
//! ```
//! loop
//! E = X * theta - Y
//! theta = theta - alpha / m * X' * E
//! until stop_condition
//!
//! E (m-by-1 matrix)
//! X (m-by-n matrix)
//! Y (m-by-1 matrix)
//! alpha Step size (scalar)
//! theta (n-by-1 matrix)
//! ```
#![allow(non_snake_case)]
#![deny(warnings)]
#![feature(plugin)]
#![plugin(linalg_macros)]
extern crate cast;
extern crate env_logger;
extern crate linalg;
extern crate lines;
extern crate stats;
extern crate time;
#[macro_use]
extern crate log;
use std::fs::File;
use std::io::{BufReader, self};
use std::path::Path;
use cast::From as _0;
use linalg::prelude::*;
use linalg::{Col, ColMut, SubMat, SubMatMut, Transposed};
use lines::Lines;
use stats::univariate::Sample;
macro_rules! timeit {
($msg:expr, $e:expr) => {{
let now = time::precise_time_ns();
let out = $e;
let elapsed = time::precise_time_ns() - now;
println!(concat!($msg, " took {} ms"), f64::from_(elapsed) / 1_000_000.);
out
}}
}
fn main() {
env_logger::init().unwrap();
// Some dummy operation to force the initialization of OpenBLAS' runtime (~90 ms) here rather
// than during the measurements below
(&mat![1., 2.; 3., 4.].inv() * &mat![1., 2.; 3., 4.]).eval();
let data = timeit!("Loading data", {
load("mpg.tsv").unwrap()
});
// Number of observations
let m = data.nrows();
println!("{} observations", m);
// Number of independent variables
let n = data.ncols() - 1;
println!("{} independent variables\n", n);
let mut X = Mat::ones((m, n + 1));
X[.., 1..] = data[.., 1..];
let y = data.col(0);
let (mu, sigma) = timeit!("Normalization", {
normalize(&mut X[.., 1..])
});
println!("mean: {:?}", mu);
println!("std deviation: {:?}\n", sigma);
let ref mut theta = ColVec::zeros(n + 1);
let alpha = 0.01;
let max_niters = 100_000;
let niters = timeit!("Gradient descent", {
descent(&X, y, theta, alpha, max_niters)
});
println!("Estimated parameters: {:?}", theta);
println!("Iterations required: {}", niters);
}
/// Evaluates the cost function for `theta`
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// z (m, 1) Auxiliary buffer to avoid allocating
fn cost(X: &SubMat<f64>, y: &Col<f64>, theta: &Col<f64>, mut z: &mut Col<f64>) -> f64 {
let m = f64::from_(X.nrows());
z[..] = y - X * theta;
let e = &*z;
e.t() * e / 2. / m
}
/// Normalizes the independent variables
///
/// X (m, n)
///
/// -> Returns a vector of means and a vector of standard deviations
fn normalize(X: &mut SubMat<f64>) -> (Vec<f64>, Vec<f64>) {
let n = usize::from_(X.ncols());
let mut mu = Vec::with_capacity(n);
let mut sigma = Vec::with_capacity(n);
for col in X.cols_mut() {
let (mean, sd) = {
let sample = Sample::new(col.as_slice().unwrap());
let mean = sample.mean();
(mean, sample.std_dev(Some(mean)))
}; |
*col -= mean;
*col /= sd;
}
(mu, sigma)
}
/// Performs the gradient descent algorithm to find the value of `theta` that minimizes the cost
/// function.
///
/// X (m, n)
/// y (m, 1)
/// theta (n, 1)
/// alpha scalar Step size
/// max_niters integer Maximum number of iterations
///
/// -> Returns the number of iterations required to converge to a solution
fn descent(
X: &SubMat<f64>,
y: &Col<f64>,
theta: &mut Col<f64>,
alpha: f64,
max_niters: u32,
) -> u32 {
const TOL: f64 = 1e-5;
let m = f64::from_(X.nrows());
// Pre-allocate a column vector to avoid allocations in the loop
let ref mut z = ColVec::zeros(X.nrows());
let mut last_J = cost(X, y, theta, z);
for i in 0..max_niters {
// z = e = y - X * theta
z[..] = y - X * theta;
// theta = theta + alpha / m * x' * e
*theta += alpha * X.t() * &*z / m;
let J = cost(X, y, theta, z);
debug!("i: {}, J: {}, theta: {:?}", i, J, theta);
// Stop condition: `cost` reduced by less than `TOL`% in last iteration
if (J - last_J).abs() / J.max(last_J) < TOL {
return i
}
last_J = J;
}
max_niters
}
/// Loads data from a TSV file
fn load<P>(path: P) -> io::Result<Transposed<Mat<f64>>> where P: AsRef<Path> {
fn load(path: &Path) -> io::Result<Transposed<Mat<f64>>> {
let mut lines = Lines::from(BufReader::new(try!(File::open(path))));
let mut v = vec![];
let ncols = {
let mut ncols = 0;
for number in try!(lines.next().unwrap()).split_whitespace() {
ncols += 1;
v.push(number.parse().unwrap());
}
ncols
};
let mut nrows = 1;
while let Some(line) = lines.next() {
let line = try!(line);
for number in line.split_whitespace() {
v.push(number.parse().unwrap());
}
nrows += 1;
}
unsafe {
Ok(Mat::from_raw_parts(v.into_boxed_slice(), (ncols, nrows)).t())
}
}
load(path.as_ref())
} |
mu.push(mean);
sigma.push(sd); | random_line_split |
layout_image.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Infrastructure to initiate network requests for images needed by the layout
//! thread. The script thread needs to be responsible for them because there's
//! no guarantee that the responsible nodes will still exist in the future if the
//! layout thread holds on to them during asynchronous operations.
use dom::bindings::reflector::DomObject;
use dom::node::{Node, document_from_node};
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::{FetchResponseMsg, FetchResponseListener, FetchMetadata, NetworkError};
use net_traits::image_cache::{ImageCache, PendingImageId};
use net_traits::request::{Destination, RequestInit as FetchRequestInit};
use network_listener::{NetworkListener, PreInvoke};
use servo_url::ServoUrl;
use std::sync::{Arc, Mutex};
struct | {
id: PendingImageId,
cache: Arc<ImageCache>,
}
impl FetchResponseListener for LayoutImageContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>) {
self.cache.notify_pending_response(
self.id,
FetchResponseMsg::ProcessResponse(metadata));
}
fn process_response_chunk(&mut self, payload: Vec<u8>) {
self.cache.notify_pending_response(
self.id,
FetchResponseMsg::ProcessResponseChunk(payload));
}
fn process_response_eof(&mut self, response: Result<(), NetworkError>) {
self.cache.notify_pending_response(self.id,
FetchResponseMsg::ProcessResponseEOF(response));
}
}
impl PreInvoke for LayoutImageContext {}
pub fn fetch_image_for_layout(url: ServoUrl,
node: &Node,
id: PendingImageId,
cache: Arc<ImageCache>) {
let context = Arc::new(Mutex::new(LayoutImageContext {
id: id,
cache: cache,
}));
let document = document_from_node(node);
let window = document.window();
let (action_sender, action_receiver) = ipc::channel().unwrap();
let listener = NetworkListener {
context: context,
task_source: window.networking_task_source(),
canceller: Some(window.task_canceller()),
};
ROUTER.add_route(action_receiver.to_opaque(), Box::new(move |message| {
listener.notify_fetch(message.to().unwrap());
}));
let request = FetchRequestInit {
url: url,
origin: document.origin().immutable().clone(),
destination: Destination::Image,
pipeline_id: Some(document.global().pipeline_id()),
.. FetchRequestInit::default()
};
// Layout image loads do not delay the document load event.
document.loader().fetch_async_background(request, action_sender);
}
| LayoutImageContext | identifier_name |
layout_image.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Infrastructure to initiate network requests for images needed by the layout
//! thread. The script thread needs to be responsible for them because there's
//! no guarantee that the responsible nodes will still exist in the future if the
//! layout thread holds on to them during asynchronous operations.
use dom::bindings::reflector::DomObject;
use dom::node::{Node, document_from_node};
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::{FetchResponseMsg, FetchResponseListener, FetchMetadata, NetworkError};
use net_traits::image_cache::{ImageCache, PendingImageId};
use net_traits::request::{Destination, RequestInit as FetchRequestInit};
use network_listener::{NetworkListener, PreInvoke};
use servo_url::ServoUrl;
use std::sync::{Arc, Mutex};
struct LayoutImageContext {
id: PendingImageId,
cache: Arc<ImageCache>,
}
impl FetchResponseListener for LayoutImageContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>) {
self.cache.notify_pending_response(
self.id,
FetchResponseMsg::ProcessResponse(metadata));
}
fn process_response_chunk(&mut self, payload: Vec<u8>) {
self.cache.notify_pending_response(
self.id,
FetchResponseMsg::ProcessResponseChunk(payload));
}
fn process_response_eof(&mut self, response: Result<(), NetworkError>) {
self.cache.notify_pending_response(self.id,
FetchResponseMsg::ProcessResponseEOF(response));
}
}
impl PreInvoke for LayoutImageContext {}
pub fn fetch_image_for_layout(url: ServoUrl,
node: &Node,
id: PendingImageId,
cache: Arc<ImageCache>) | url: url,
origin: document.origin().immutable().clone(),
destination: Destination::Image,
pipeline_id: Some(document.global().pipeline_id()),
.. FetchRequestInit::default()
};
// Layout image loads do not delay the document load event.
document.loader().fetch_async_background(request, action_sender);
}
| {
let context = Arc::new(Mutex::new(LayoutImageContext {
id: id,
cache: cache,
}));
let document = document_from_node(node);
let window = document.window();
let (action_sender, action_receiver) = ipc::channel().unwrap();
let listener = NetworkListener {
context: context,
task_source: window.networking_task_source(),
canceller: Some(window.task_canceller()),
};
ROUTER.add_route(action_receiver.to_opaque(), Box::new(move |message| {
listener.notify_fetch(message.to().unwrap());
}));
let request = FetchRequestInit { | identifier_body |
layout_image.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Infrastructure to initiate network requests for images needed by the layout
//! thread. The script thread needs to be responsible for them because there's
//! no guarantee that the responsible nodes will still exist in the future if the
//! layout thread holds on to them during asynchronous operations.
use dom::bindings::reflector::DomObject;
use dom::node::{Node, document_from_node};
use ipc_channel::ipc;
use ipc_channel::router::ROUTER;
use net_traits::{FetchResponseMsg, FetchResponseListener, FetchMetadata, NetworkError};
use net_traits::image_cache::{ImageCache, PendingImageId};
use net_traits::request::{Destination, RequestInit as FetchRequestInit};
use network_listener::{NetworkListener, PreInvoke};
use servo_url::ServoUrl;
use std::sync::{Arc, Mutex};
struct LayoutImageContext {
id: PendingImageId,
cache: Arc<ImageCache>,
}
impl FetchResponseListener for LayoutImageContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self, metadata: Result<FetchMetadata, NetworkError>) {
self.cache.notify_pending_response(
self.id,
FetchResponseMsg::ProcessResponse(metadata));
}
fn process_response_chunk(&mut self, payload: Vec<u8>) {
self.cache.notify_pending_response(
self.id, | }
fn process_response_eof(&mut self, response: Result<(), NetworkError>) {
self.cache.notify_pending_response(self.id,
FetchResponseMsg::ProcessResponseEOF(response));
}
}
impl PreInvoke for LayoutImageContext {}
pub fn fetch_image_for_layout(url: ServoUrl,
node: &Node,
id: PendingImageId,
cache: Arc<ImageCache>) {
let context = Arc::new(Mutex::new(LayoutImageContext {
id: id,
cache: cache,
}));
let document = document_from_node(node);
let window = document.window();
let (action_sender, action_receiver) = ipc::channel().unwrap();
let listener = NetworkListener {
context: context,
task_source: window.networking_task_source(),
canceller: Some(window.task_canceller()),
};
ROUTER.add_route(action_receiver.to_opaque(), Box::new(move |message| {
listener.notify_fetch(message.to().unwrap());
}));
let request = FetchRequestInit {
url: url,
origin: document.origin().immutable().clone(),
destination: Destination::Image,
pipeline_id: Some(document.global().pipeline_id()),
.. FetchRequestInit::default()
};
// Layout image loads do not delay the document load event.
document.loader().fetch_async_background(request, action_sender);
} | FetchResponseMsg::ProcessResponseChunk(payload)); | random_line_split |
hkt_notes.rs | higher kinded types in rust
struct List<A> {... }
/* note we could fully apply this or use
trait Functor for List {... }
trait Functor for List<_> {... }
fn (&self<A>
Functor<A> for List
when we implement a normal trait we have a hidden type parameter
if we look at Haskell for example, a simple show class looks like this
class Show a
show :: a -> String
instance Show String
show x = x
trait Show {
fn show(&self) -> String
}
// Here &self is &self is &String
impl Show for String {
fn | (&self) -> String {
self.clone()
}
}
If we move to an example like:
// Could be named anything, just keeping with Haskell
// convention, this represents a container that can be mapped ove
//
class Functor f
map :: (a -> b) -> f a -> f b
instance Functor [] where
map f [] = []
map f (x:xs) = f x : map f xs
trait Functor where Self<_> {... }
impl Functor for List
| show | identifier_name |
hkt_notes.rs | higher kinded types in rust
struct List<A> {... }
/* note we could fully apply this or use
trait Functor for List {... }
trait Functor for List<_> {... }
fn (&self<A>
Functor<A> for List
when we implement a normal trait we have a hidden type parameter
if we look at Haskell for example, a simple show class looks like this
class Show a
show :: a -> String
instance Show String
show x = x
trait Show {
fn show(&self) -> String
}
// Here &self is &self is &String
impl Show for String {
fn show(&self) -> String {
self.clone()
}
}
If we move to an example like:
// Could be named anything, just keeping with Haskell
// convention, this represents a container that can be mapped ove
// | map f (x:xs) = f x : map f xs
trait Functor where Self<_> {... }
impl Functor for List | class Functor f
map :: (a -> b) -> f a -> f b
instance Functor [] where
map f [] = [] | random_line_split |
lib.rs | //! Render to a window created by Glutin, using Glium's OpenGL functions
#[macro_use] extern crate log;
#[macro_use] extern crate glium;
extern crate breeze_backend;
use breeze_backend::{BackendAction, BackendResult, Renderer};
use breeze_backend::ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
use breeze_backend::viewport::Viewport;
use glium::{DisplayBuild, Surface, Rect};
use glium::backend::glutin_backend::GlutinFacade;
use glium::index::{NoIndices, PrimitiveType};
use glium::glutin::WindowBuilder;
use glium::program::Program;
use glium::texture::{ClientFormat, RawImage2d, SrgbTexture2d};
use glium::uniforms::MagnifySamplerFilter;
use glium::vertex::VertexBuffer;
use std::borrow::Cow;
use std::error::Error;
#[derive(Copy, Clone)]
struct Vertex {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
const VERTEX_SHADER_SRC: &'static str = r#"
#version 140
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
v_tex_coords = tex_coords;
gl_Position = vec4(position, 0.0, 1.0);
}
"#;
const FRAGMENT_SHADER_SRC: &'static str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
uniform sampler2D tex;
void main() {
color = texture(tex, v_tex_coords);
}
"#;
pub struct GliumRenderer {
display: GlutinFacade,
/// This vertex buffer will only ever store 4 vertices spanning the whole window
vbuf: VertexBuffer<Vertex>,
/// A simple shader that maps our texture onto the window
program: Program,
/// This texture is updated with the PPU's data every frame
texture: SrgbTexture2d,
}
impl GliumRenderer {
fn handle_events(&mut self) -> BackendResult<Vec<BackendAction>> {
use glium::glutin::Event::*;
for ev in self.display.poll_events() {
match ev {
Closed => {
info!("quit event -> exiting");
return Ok(vec![BackendAction::Exit]);
}
Resized(w, h) => |
_ => {}
}
}
Ok(vec![])
}
}
fn resize(vbuf: &mut VertexBuffer<Vertex>, win_w: u32, win_h: u32) {
let Viewport { x, y, w, h } = Viewport::for_window_size(win_w, win_h);
let (win_w, win_h) = (win_w as f32, win_h as f32);
let (x, y, w, h) = (x as f32 / win_w, y as f32 / win_h, w as f32 / win_w, h as f32 / win_h);
// Since I can't be bothered to put in a translation matrix, we have to translate the pixel
// coords to OpenGL's [-1, 1] system.
let vx = (x - 0.5) * 2.0;
let vy = (y - 0.5) * 2.0;
let rect = make_rect(vx, vy, w * 2.0, h * 2.0);
vbuf.write(&rect);
}
/// Build 4 Vertices spanning up a rectangle. Bottom-Left corner = (-1, -1).
fn make_rect(x: f32, y: f32, w: f32, h: f32) -> [Vertex; 4] {
// FIXME Use a matrix instead of rebuilding the geometry on every resize
[
Vertex { position: [x, y + h], tex_coords: [0.0, 0.0] },
Vertex { position: [x + w, y + h], tex_coords: [1.0, 0.0] },
Vertex { position: [x, y], tex_coords: [0.0, 1.0] },
Vertex { position: [x + w, y], tex_coords: [1.0, 1.0] },
]
}
impl Renderer for GliumRenderer {
fn create() -> Result<Self, Box<Error>> {
let display = try!(WindowBuilder::new()
.with_dimensions(SCREEN_WIDTH * 3, SCREEN_HEIGHT * 3)
.with_title("breeze".to_owned())
.build_glium());
let mut vbuf = try!(VertexBuffer::empty_dynamic(&display, 4));
resize(&mut vbuf, SCREEN_WIDTH * 3, SCREEN_HEIGHT * 3);
Ok(GliumRenderer {
vbuf: vbuf,
program: try!(
Program::from_source(&display, VERTEX_SHADER_SRC, FRAGMENT_SHADER_SRC, None)),
texture: try!(SrgbTexture2d::empty(&display, SCREEN_WIDTH, SCREEN_HEIGHT)),
display: display,
})
}
fn render(&mut self, frame_data: &[u8]) -> BackendResult<Vec<BackendAction>> {
// upload new texture data
self.texture.write(Rect {
left: 0,
bottom: 0,
width: SCREEN_WIDTH,
height: SCREEN_HEIGHT,
}, RawImage2d {
data: Cow::Borrowed(frame_data),
width: SCREEN_WIDTH,
height: SCREEN_HEIGHT,
format: ClientFormat::U8U8U8,
});
let mut target = self.display.draw();
target.clear_color_srgb(0.0, 0.0, 0.0, 0.0);
target.draw(
&self.vbuf,
&NoIndices(PrimitiveType::TriangleStrip),
&self.program,
&uniform! {
tex: self.texture.sampled()
.magnify_filter(MagnifySamplerFilter::Nearest),
},
&Default::default()).unwrap();
target.finish().unwrap();
self.handle_events()
}
fn set_rom_title(&mut self, title: &str) {
if let Some(win_ref) = self.display.get_window() {
win_ref.set_title(title);
}
}
}
| {
resize(&mut self.vbuf, w, h);
} | conditional_block |
lib.rs | //! Render to a window created by Glutin, using Glium's OpenGL functions
#[macro_use] extern crate log;
#[macro_use] extern crate glium;
extern crate breeze_backend;
use breeze_backend::{BackendAction, BackendResult, Renderer};
use breeze_backend::ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
use breeze_backend::viewport::Viewport;
use glium::{DisplayBuild, Surface, Rect};
use glium::backend::glutin_backend::GlutinFacade;
use glium::index::{NoIndices, PrimitiveType};
use glium::glutin::WindowBuilder;
use glium::program::Program;
use glium::texture::{ClientFormat, RawImage2d, SrgbTexture2d};
use glium::uniforms::MagnifySamplerFilter;
use glium::vertex::VertexBuffer;
use std::borrow::Cow;
use std::error::Error;
#[derive(Copy, Clone)]
struct Vertex {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
const VERTEX_SHADER_SRC: &'static str = r#"
#version 140
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
v_tex_coords = tex_coords;
gl_Position = vec4(position, 0.0, 1.0);
}
"#;
const FRAGMENT_SHADER_SRC: &'static str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
uniform sampler2D tex;
void main() {
color = texture(tex, v_tex_coords);
}
"#;
pub struct GliumRenderer {
display: GlutinFacade,
/// This vertex buffer will only ever store 4 vertices spanning the whole window
vbuf: VertexBuffer<Vertex>,
/// A simple shader that maps our texture onto the window
program: Program,
/// This texture is updated with the PPU's data every frame
texture: SrgbTexture2d,
}
impl GliumRenderer {
fn handle_events(&mut self) -> BackendResult<Vec<BackendAction>> {
use glium::glutin::Event::*;
for ev in self.display.poll_events() {
match ev {
Closed => {
info!("quit event -> exiting");
return Ok(vec![BackendAction::Exit]);
}
Resized(w, h) => {
resize(&mut self.vbuf, w, h);
}
_ => {}
}
}
Ok(vec![])
}
}
fn resize(vbuf: &mut VertexBuffer<Vertex>, win_w: u32, win_h: u32) {
let Viewport { x, y, w, h } = Viewport::for_window_size(win_w, win_h);
let (win_w, win_h) = (win_w as f32, win_h as f32);
let (x, y, w, h) = (x as f32 / win_w, y as f32 / win_h, w as f32 / win_w, h as f32 / win_h);
// Since I can't be bothered to put in a translation matrix, we have to translate the pixel
// coords to OpenGL's [-1, 1] system.
let vx = (x - 0.5) * 2.0;
let vy = (y - 0.5) * 2.0;
let rect = make_rect(vx, vy, w * 2.0, h * 2.0);
vbuf.write(&rect);
}
/// Build 4 Vertices spanning up a rectangle. Bottom-Left corner = (-1, -1).
fn make_rect(x: f32, y: f32, w: f32, h: f32) -> [Vertex; 4] {
// FIXME Use a matrix instead of rebuilding the geometry on every resize
[
Vertex { position: [x, y + h], tex_coords: [0.0, 0.0] },
Vertex { position: [x + w, y + h], tex_coords: [1.0, 0.0] },
Vertex { position: [x, y], tex_coords: [0.0, 1.0] },
Vertex { position: [x + w, y], tex_coords: [1.0, 1.0] },
]
}
impl Renderer for GliumRenderer {
fn create() -> Result<Self, Box<Error>> {
let display = try!(WindowBuilder::new()
.with_dimensions(SCREEN_WIDTH * 3, SCREEN_HEIGHT * 3)
.with_title("breeze".to_owned())
.build_glium());
let mut vbuf = try!(VertexBuffer::empty_dynamic(&display, 4));
resize(&mut vbuf, SCREEN_WIDTH * 3, SCREEN_HEIGHT * 3);
Ok(GliumRenderer {
vbuf: vbuf,
program: try!(
Program::from_source(&display, VERTEX_SHADER_SRC, FRAGMENT_SHADER_SRC, None)),
texture: try!(SrgbTexture2d::empty(&display, SCREEN_WIDTH, SCREEN_HEIGHT)),
display: display,
})
}
fn render(&mut self, frame_data: &[u8]) -> BackendResult<Vec<BackendAction>> {
// upload new texture data
self.texture.write(Rect {
left: 0,
bottom: 0,
width: SCREEN_WIDTH,
height: SCREEN_HEIGHT,
}, RawImage2d { | format: ClientFormat::U8U8U8,
});
let mut target = self.display.draw();
target.clear_color_srgb(0.0, 0.0, 0.0, 0.0);
target.draw(
&self.vbuf,
&NoIndices(PrimitiveType::TriangleStrip),
&self.program,
&uniform! {
tex: self.texture.sampled()
.magnify_filter(MagnifySamplerFilter::Nearest),
},
&Default::default()).unwrap();
target.finish().unwrap();
self.handle_events()
}
fn set_rom_title(&mut self, title: &str) {
if let Some(win_ref) = self.display.get_window() {
win_ref.set_title(title);
}
}
} | data: Cow::Borrowed(frame_data),
width: SCREEN_WIDTH,
height: SCREEN_HEIGHT, | random_line_split |
lib.rs | //! Render to a window created by Glutin, using Glium's OpenGL functions
#[macro_use] extern crate log;
#[macro_use] extern crate glium;
extern crate breeze_backend;
use breeze_backend::{BackendAction, BackendResult, Renderer};
use breeze_backend::ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
use breeze_backend::viewport::Viewport;
use glium::{DisplayBuild, Surface, Rect};
use glium::backend::glutin_backend::GlutinFacade;
use glium::index::{NoIndices, PrimitiveType};
use glium::glutin::WindowBuilder;
use glium::program::Program;
use glium::texture::{ClientFormat, RawImage2d, SrgbTexture2d};
use glium::uniforms::MagnifySamplerFilter;
use glium::vertex::VertexBuffer;
use std::borrow::Cow;
use std::error::Error;
#[derive(Copy, Clone)]
struct Vertex {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
const VERTEX_SHADER_SRC: &'static str = r#"
#version 140
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
void main() {
v_tex_coords = tex_coords;
gl_Position = vec4(position, 0.0, 1.0);
}
"#;
const FRAGMENT_SHADER_SRC: &'static str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
uniform sampler2D tex;
void main() {
color = texture(tex, v_tex_coords);
}
"#;
pub struct GliumRenderer {
display: GlutinFacade,
/// This vertex buffer will only ever store 4 vertices spanning the whole window
vbuf: VertexBuffer<Vertex>,
/// A simple shader that maps our texture onto the window
program: Program,
/// This texture is updated with the PPU's data every frame
texture: SrgbTexture2d,
}
impl GliumRenderer {
fn handle_events(&mut self) -> BackendResult<Vec<BackendAction>> {
use glium::glutin::Event::*;
for ev in self.display.poll_events() {
match ev {
Closed => {
info!("quit event -> exiting");
return Ok(vec![BackendAction::Exit]);
}
Resized(w, h) => {
resize(&mut self.vbuf, w, h);
}
_ => {}
}
}
Ok(vec![])
}
}
fn resize(vbuf: &mut VertexBuffer<Vertex>, win_w: u32, win_h: u32) {
let Viewport { x, y, w, h } = Viewport::for_window_size(win_w, win_h);
let (win_w, win_h) = (win_w as f32, win_h as f32);
let (x, y, w, h) = (x as f32 / win_w, y as f32 / win_h, w as f32 / win_w, h as f32 / win_h);
// Since I can't be bothered to put in a translation matrix, we have to translate the pixel
// coords to OpenGL's [-1, 1] system.
let vx = (x - 0.5) * 2.0;
let vy = (y - 0.5) * 2.0;
let rect = make_rect(vx, vy, w * 2.0, h * 2.0);
vbuf.write(&rect);
}
/// Build 4 Vertices spanning up a rectangle. Bottom-Left corner = (-1, -1).
fn make_rect(x: f32, y: f32, w: f32, h: f32) -> [Vertex; 4] {
// FIXME Use a matrix instead of rebuilding the geometry on every resize
[
Vertex { position: [x, y + h], tex_coords: [0.0, 0.0] },
Vertex { position: [x + w, y + h], tex_coords: [1.0, 0.0] },
Vertex { position: [x, y], tex_coords: [0.0, 1.0] },
Vertex { position: [x + w, y], tex_coords: [1.0, 1.0] },
]
}
impl Renderer for GliumRenderer {
fn create() -> Result<Self, Box<Error>> {
let display = try!(WindowBuilder::new()
.with_dimensions(SCREEN_WIDTH * 3, SCREEN_HEIGHT * 3)
.with_title("breeze".to_owned())
.build_glium());
let mut vbuf = try!(VertexBuffer::empty_dynamic(&display, 4));
resize(&mut vbuf, SCREEN_WIDTH * 3, SCREEN_HEIGHT * 3);
Ok(GliumRenderer {
vbuf: vbuf,
program: try!(
Program::from_source(&display, VERTEX_SHADER_SRC, FRAGMENT_SHADER_SRC, None)),
texture: try!(SrgbTexture2d::empty(&display, SCREEN_WIDTH, SCREEN_HEIGHT)),
display: display,
})
}
fn | (&mut self, frame_data: &[u8]) -> BackendResult<Vec<BackendAction>> {
// upload new texture data
self.texture.write(Rect {
left: 0,
bottom: 0,
width: SCREEN_WIDTH,
height: SCREEN_HEIGHT,
}, RawImage2d {
data: Cow::Borrowed(frame_data),
width: SCREEN_WIDTH,
height: SCREEN_HEIGHT,
format: ClientFormat::U8U8U8,
});
let mut target = self.display.draw();
target.clear_color_srgb(0.0, 0.0, 0.0, 0.0);
target.draw(
&self.vbuf,
&NoIndices(PrimitiveType::TriangleStrip),
&self.program,
&uniform! {
tex: self.texture.sampled()
.magnify_filter(MagnifySamplerFilter::Nearest),
},
&Default::default()).unwrap();
target.finish().unwrap();
self.handle_events()
}
fn set_rom_title(&mut self, title: &str) {
if let Some(win_ref) = self.display.get_window() {
win_ref.set_title(title);
}
}
}
| render | identifier_name |
main.rs | extern crate itertools;
use std::ops::Add;
use itertools::Itertools;
struct Character {
hitpoints: u16,
damage: u16,
armor: u16
}
impl Character {
fn rounds(&self, opponent: &Character) -> u16 {
let round_damage: f64 =
if opponent.armor >= self.damage { 1 }
else { self.damage - opponent.armor } as f64;
((opponent.hitpoints as f64) / round_damage).ceil() as u16
}
fn beats(&self, opponent: &Character) -> bool {
let a = self.rounds(opponent);
let b = opponent.rounds(&self);
a <= b
}
}
#[derive(Clone)]
struct Inventory {
cost: u16,
damage: u16,
armor: u16
}
impl<'a, 'b> Add<&'b Inventory> for &'a Inventory {
type Output = Inventory;
fn add(self, other: &'b Inventory) -> Inventory {
Inventory {
cost: self.cost + other.cost,
damage: self.damage + other.damage,
armor: self.armor + other.armor
}
}
}
fn main() {
let weapons = [
Inventory { cost: 8, damage: 4, armor: 0 },
Inventory { cost: 10, damage: 5, armor: 0 },
Inventory { cost: 25, damage: 6, armor: 0 },
Inventory { cost: 40, damage: 7, armor: 0 },
Inventory { cost: 74, damage: 8, armor: 0 },
];
let armor = [
Inventory { cost: 13, damage: 0, armor: 1 },
Inventory { cost: 31, damage: 0, armor: 2 },
Inventory { cost: 53, damage: 0, armor: 3 },
Inventory { cost: 75, damage: 0, armor: 4 },
Inventory { cost: 102, damage: 0, armor: 5 },
];
let rings = [
Inventory { cost: 25, damage: 1, armor: 0 },
Inventory { cost: 50, damage: 2, armor: 0 },
Inventory { cost: 100, damage: 3, armor: 0 },
Inventory { cost: 20, damage: 0, armor: 1 },
Inventory { cost: 40, damage: 0, armor: 2 },
Inventory { cost: 80, damage: 0, armor: 3 },
];
let boss = Character { hitpoints: 104, damage: 8, armor: 1 };
let mut minimal_inventory = Inventory {
cost: u16::max_value(), damage: 0, armor: 0
};
let mut maximal_inventory = Inventory {
cost: 0, damage: 0, armor: 0
};
for weapon in &weapons {
let mut inventories = Vec::new();
inventories.push(weapon.clone());
for armor in &armor {
inventories.push(weapon + armor);
}
| for inventory in inventories.iter() {
additional.push(inventory + &rings.0);
additional.push(inventory + &rings.1);
additional.push(inventory + &(rings.0 + rings.1));
}
}
inventories.extend(additional);
for inventory in &inventories {
let player = Character {
hitpoints: 100,
damage: inventory.damage,
armor: inventory.armor
};
let winner = player.beats(&boss);
if winner && inventory.cost < minimal_inventory.cost {
minimal_inventory = inventory.clone();
} else if!winner && inventory.cost > maximal_inventory.cost {
maximal_inventory = inventory.clone();
}
}
}
println!("Uncle Scrooge wins by spending merely {} bucks.",
minimal_inventory.cost);
println!("Betrayed by the shopkeeper, he pays {} bucks and loses.",
maximal_inventory.cost);
} | let mut additional = Vec::with_capacity(rings.len() * 3);
for rings in rings.iter().combinations() { | random_line_split |
main.rs |
extern crate itertools;
use std::ops::Add;
use itertools::Itertools;
struct Character {
hitpoints: u16,
damage: u16,
armor: u16
}
impl Character {
fn rounds(&self, opponent: &Character) -> u16 {
let round_damage: f64 =
if opponent.armor >= self.damage { 1 }
else | as f64;
((opponent.hitpoints as f64) / round_damage).ceil() as u16
}
fn beats(&self, opponent: &Character) -> bool {
let a = self.rounds(opponent);
let b = opponent.rounds(&self);
a <= b
}
}
#[derive(Clone)]
struct Inventory {
cost: u16,
damage: u16,
armor: u16
}
impl<'a, 'b> Add<&'b Inventory> for &'a Inventory {
type Output = Inventory;
fn add(self, other: &'b Inventory) -> Inventory {
Inventory {
cost: self.cost + other.cost,
damage: self.damage + other.damage,
armor: self.armor + other.armor
}
}
}
fn main() {
let weapons = [
Inventory { cost: 8, damage: 4, armor: 0 },
Inventory { cost: 10, damage: 5, armor: 0 },
Inventory { cost: 25, damage: 6, armor: 0 },
Inventory { cost: 40, damage: 7, armor: 0 },
Inventory { cost: 74, damage: 8, armor: 0 },
];
let armor = [
Inventory { cost: 13, damage: 0, armor: 1 },
Inventory { cost: 31, damage: 0, armor: 2 },
Inventory { cost: 53, damage: 0, armor: 3 },
Inventory { cost: 75, damage: 0, armor: 4 },
Inventory { cost: 102, damage: 0, armor: 5 },
];
let rings = [
Inventory { cost: 25, damage: 1, armor: 0 },
Inventory { cost: 50, damage: 2, armor: 0 },
Inventory { cost: 100, damage: 3, armor: 0 },
Inventory { cost: 20, damage: 0, armor: 1 },
Inventory { cost: 40, damage: 0, armor: 2 },
Inventory { cost: 80, damage: 0, armor: 3 },
];
let boss = Character { hitpoints: 104, damage: 8, armor: 1 };
let mut minimal_inventory = Inventory {
cost: u16::max_value(), damage: 0, armor: 0
};
let mut maximal_inventory = Inventory {
cost: 0, damage: 0, armor: 0
};
for weapon in &weapons {
let mut inventories = Vec::new();
inventories.push(weapon.clone());
for armor in &armor {
inventories.push(weapon + armor);
}
let mut additional = Vec::with_capacity(rings.len() * 3);
for rings in rings.iter().combinations() {
for inventory in inventories.iter() {
additional.push(inventory + &rings.0);
additional.push(inventory + &rings.1);
additional.push(inventory + &(rings.0 + rings.1));
}
}
inventories.extend(additional);
for inventory in &inventories {
let player = Character {
hitpoints: 100,
damage: inventory.damage,
armor: inventory.armor
};
let winner = player.beats(&boss);
if winner && inventory.cost < minimal_inventory.cost {
minimal_inventory = inventory.clone();
} else if!winner && inventory.cost > maximal_inventory.cost {
maximal_inventory = inventory.clone();
}
}
}
println!("Uncle Scrooge wins by spending merely {} bucks.",
minimal_inventory.cost);
println!("Betrayed by the shopkeeper, he pays {} bucks and loses.",
maximal_inventory.cost);
}
| { self.damage - opponent.armor } | conditional_block |
main.rs |
extern crate itertools;
use std::ops::Add;
use itertools::Itertools;
struct | {
hitpoints: u16,
damage: u16,
armor: u16
}
impl Character {
fn rounds(&self, opponent: &Character) -> u16 {
let round_damage: f64 =
if opponent.armor >= self.damage { 1 }
else { self.damage - opponent.armor } as f64;
((opponent.hitpoints as f64) / round_damage).ceil() as u16
}
fn beats(&self, opponent: &Character) -> bool {
let a = self.rounds(opponent);
let b = opponent.rounds(&self);
a <= b
}
}
#[derive(Clone)]
struct Inventory {
cost: u16,
damage: u16,
armor: u16
}
impl<'a, 'b> Add<&'b Inventory> for &'a Inventory {
type Output = Inventory;
fn add(self, other: &'b Inventory) -> Inventory {
Inventory {
cost: self.cost + other.cost,
damage: self.damage + other.damage,
armor: self.armor + other.armor
}
}
}
fn main() {
let weapons = [
Inventory { cost: 8, damage: 4, armor: 0 },
Inventory { cost: 10, damage: 5, armor: 0 },
Inventory { cost: 25, damage: 6, armor: 0 },
Inventory { cost: 40, damage: 7, armor: 0 },
Inventory { cost: 74, damage: 8, armor: 0 },
];
let armor = [
Inventory { cost: 13, damage: 0, armor: 1 },
Inventory { cost: 31, damage: 0, armor: 2 },
Inventory { cost: 53, damage: 0, armor: 3 },
Inventory { cost: 75, damage: 0, armor: 4 },
Inventory { cost: 102, damage: 0, armor: 5 },
];
let rings = [
Inventory { cost: 25, damage: 1, armor: 0 },
Inventory { cost: 50, damage: 2, armor: 0 },
Inventory { cost: 100, damage: 3, armor: 0 },
Inventory { cost: 20, damage: 0, armor: 1 },
Inventory { cost: 40, damage: 0, armor: 2 },
Inventory { cost: 80, damage: 0, armor: 3 },
];
let boss = Character { hitpoints: 104, damage: 8, armor: 1 };
let mut minimal_inventory = Inventory {
cost: u16::max_value(), damage: 0, armor: 0
};
let mut maximal_inventory = Inventory {
cost: 0, damage: 0, armor: 0
};
for weapon in &weapons {
let mut inventories = Vec::new();
inventories.push(weapon.clone());
for armor in &armor {
inventories.push(weapon + armor);
}
let mut additional = Vec::with_capacity(rings.len() * 3);
for rings in rings.iter().combinations() {
for inventory in inventories.iter() {
additional.push(inventory + &rings.0);
additional.push(inventory + &rings.1);
additional.push(inventory + &(rings.0 + rings.1));
}
}
inventories.extend(additional);
for inventory in &inventories {
let player = Character {
hitpoints: 100,
damage: inventory.damage,
armor: inventory.armor
};
let winner = player.beats(&boss);
if winner && inventory.cost < minimal_inventory.cost {
minimal_inventory = inventory.clone();
} else if!winner && inventory.cost > maximal_inventory.cost {
maximal_inventory = inventory.clone();
}
}
}
println!("Uncle Scrooge wins by spending merely {} bucks.",
minimal_inventory.cost);
println!("Betrayed by the shopkeeper, he pays {} bucks and loses.",
maximal_inventory.cost);
}
| Character | identifier_name |
lib.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
#![cfg_attr(feature = "allocator_api", feature(nonnull_slice_from_raw_parts))]
#[cfg(test)]
#[macro_use]
extern crate std;
use scudo_sys::{scudo_allocate, scudo_deallocate, scudo_print_stats, SCUDO_MIN_ALIGN};
use core::alloc::{GlobalAlloc, Layout};
use core::cmp::max;
/// Zero sized type representing the global static scudo allocator declared in C.
#[derive(Clone, Copy)]
pub struct GlobalScudoAllocator;
/// Returns `layout` or the minimum size/align layout for scudo if its too small.
fn fit_layout(layout: Layout) -> Layout {
// SAFETY: SCUDO_MIN_ALIGN is constant and known to be powers of 2.
let min_align = unsafe { SCUDO_MIN_ALIGN } as usize;
let align = max(min_align, layout.align());
// SAFETY: Size and align are good by construction.
unsafe { Layout::from_size_align_unchecked(layout.size(), align) }
}
unsafe impl GlobalAlloc for GlobalScudoAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let layout = fit_layout(layout);
scudo_allocate(layout.size(), layout.align()) as _
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr as _, layout.size(), layout.align());
}
}
impl GlobalScudoAllocator {
/// Prints the global Scudo allocator's internal statistics. |
#[cfg(feature = "allocator_api")]
use core::alloc::AllocError;
#[cfg(feature = "allocator_api")]
use core::ptr::NonNull;
#[cfg(feature = "allocator_api")]
unsafe impl core::alloc::Allocator for GlobalScudoAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let layout = fit_layout(layout);
// TODO(cneo): Scudo buckets and therefore overallocates. Use SizeClassMap to
// return the correct length for the slice?
let ptr = unsafe { scudo_allocate(layout.size(), layout.align()) } as _;
let n = NonNull::new(ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(n, layout.size()))
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr.as_ptr() as _, layout.size(), layout.align());
}
}
#[cfg(test)]
pub mod test {
use super::*;
use std::prelude::v1::*;
use core::alloc::Layout;
use libc::{c_ulong, c_void, size_t};
use scudo_sys::{scudo_disable, scudo_enable, scudo_iterate};
extern "C" fn contains(_address: c_ulong, size: size_t, pair: *mut c_void) {
let (target_size, count) = unsafe { &mut *(pair as *mut (usize, usize)) };
if size == *target_size {
*count += 1;
}
}
/// Test-only function that returns the number of allocations of a given size.
fn count_allocations_by_size(size: usize) -> usize {
let mut size_and_count = (size, 0usize);
unsafe {
scudo_disable();
scudo_iterate(
contains,
&mut size_and_count as *mut (usize, usize) as *mut c_void,
);
scudo_enable();
}
size_and_count.1
}
#[test]
fn test_alloc_and_dealloc_use_scudo() {
let a = GlobalScudoAllocator;
let layout = Layout::from_size_align(4242, 16).unwrap();
assert_eq!(count_allocations_by_size(4242), 0);
let p = unsafe { a.alloc(layout) };
assert_eq!(count_allocations_by_size(4242), 1);
unsafe { a.dealloc(p, layout) };
assert_eq!(count_allocations_by_size(4242), 0);
}
#[global_allocator]
static A: GlobalScudoAllocator = GlobalScudoAllocator;
#[test]
fn test_vec_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_1337), 0);
let mut v = vec![8u8; 8200_1337];
assert_eq!(count_allocations_by_size(8200_1337), 1);
v.clear();
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_1337), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_vec_with_custom_allocator_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_4242), 0);
let mut v = Vec::<u8, GlobalScudoAllocator>::with_capacity_in(8200_4242, A);
assert_eq!(count_allocations_by_size(8200_4242), 1);
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_4242), 0);
}
#[test]
fn test_box_uses_scudo() {
assert_eq!(count_allocations_by_size(20), 0);
let b = Box::new([3.0f32; 5]);
assert_eq!(count_allocations_by_size(20), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(20), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_box_with_custom_allocator_uses_scudo() {
assert_eq!(count_allocations_by_size(28), 0);
let b = Box::new_in([3.0f32; 7], A);
assert_eq!(count_allocations_by_size(28), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(28), 0);
}
#[test]
fn test_1byte_box_uses_scudo() {
// Unlike the other arbitrary size allocations, it seems
// Rust's test harness does have some 1 byte allocations so we cannot
// assert there are 0, then 1, then 0.
let before = count_allocations_by_size(1);
let b = Box::new(1i8);
assert_eq!(count_allocations_by_size(1), before + 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(1), before);
}
} | pub fn print_stats() {
unsafe { scudo_print_stats() }
}
} | random_line_split |
lib.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
#![cfg_attr(feature = "allocator_api", feature(nonnull_slice_from_raw_parts))]
#[cfg(test)]
#[macro_use]
extern crate std;
use scudo_sys::{scudo_allocate, scudo_deallocate, scudo_print_stats, SCUDO_MIN_ALIGN};
use core::alloc::{GlobalAlloc, Layout};
use core::cmp::max;
/// Zero sized type representing the global static scudo allocator declared in C.
#[derive(Clone, Copy)]
pub struct GlobalScudoAllocator;
/// Returns `layout` or the minimum size/align layout for scudo if its too small.
fn fit_layout(layout: Layout) -> Layout {
// SAFETY: SCUDO_MIN_ALIGN is constant and known to be powers of 2.
let min_align = unsafe { SCUDO_MIN_ALIGN } as usize;
let align = max(min_align, layout.align());
// SAFETY: Size and align are good by construction.
unsafe { Layout::from_size_align_unchecked(layout.size(), align) }
}
unsafe impl GlobalAlloc for GlobalScudoAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let layout = fit_layout(layout);
scudo_allocate(layout.size(), layout.align()) as _
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr as _, layout.size(), layout.align());
}
}
impl GlobalScudoAllocator {
/// Prints the global Scudo allocator's internal statistics.
pub fn print_stats() {
unsafe { scudo_print_stats() }
}
}
#[cfg(feature = "allocator_api")]
use core::alloc::AllocError;
#[cfg(feature = "allocator_api")]
use core::ptr::NonNull;
#[cfg(feature = "allocator_api")]
unsafe impl core::alloc::Allocator for GlobalScudoAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let layout = fit_layout(layout);
// TODO(cneo): Scudo buckets and therefore overallocates. Use SizeClassMap to
// return the correct length for the slice?
let ptr = unsafe { scudo_allocate(layout.size(), layout.align()) } as _;
let n = NonNull::new(ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(n, layout.size()))
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr.as_ptr() as _, layout.size(), layout.align());
}
}
#[cfg(test)]
pub mod test {
use super::*;
use std::prelude::v1::*;
use core::alloc::Layout;
use libc::{c_ulong, c_void, size_t};
use scudo_sys::{scudo_disable, scudo_enable, scudo_iterate};
extern "C" fn contains(_address: c_ulong, size: size_t, pair: *mut c_void) {
let (target_size, count) = unsafe { &mut *(pair as *mut (usize, usize)) };
if size == *target_size {
*count += 1;
}
}
/// Test-only function that returns the number of allocations of a given size.
fn count_allocations_by_size(size: usize) -> usize {
let mut size_and_count = (size, 0usize);
unsafe {
scudo_disable();
scudo_iterate(
contains,
&mut size_and_count as *mut (usize, usize) as *mut c_void,
);
scudo_enable();
}
size_and_count.1
}
#[test]
fn test_alloc_and_dealloc_use_scudo() {
let a = GlobalScudoAllocator;
let layout = Layout::from_size_align(4242, 16).unwrap();
assert_eq!(count_allocations_by_size(4242), 0);
let p = unsafe { a.alloc(layout) };
assert_eq!(count_allocations_by_size(4242), 1);
unsafe { a.dealloc(p, layout) };
assert_eq!(count_allocations_by_size(4242), 0);
}
#[global_allocator]
static A: GlobalScudoAllocator = GlobalScudoAllocator;
#[test]
fn test_vec_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_1337), 0);
let mut v = vec![8u8; 8200_1337];
assert_eq!(count_allocations_by_size(8200_1337), 1);
v.clear();
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_1337), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_vec_with_custom_allocator_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_4242), 0);
let mut v = Vec::<u8, GlobalScudoAllocator>::with_capacity_in(8200_4242, A);
assert_eq!(count_allocations_by_size(8200_4242), 1);
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_4242), 0);
}
#[test]
fn test_box_uses_scudo() {
assert_eq!(count_allocations_by_size(20), 0);
let b = Box::new([3.0f32; 5]);
assert_eq!(count_allocations_by_size(20), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(20), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_box_with_custom_allocator_uses_scudo() |
#[test]
fn test_1byte_box_uses_scudo() {
// Unlike the other arbitrary size allocations, it seems
// Rust's test harness does have some 1 byte allocations so we cannot
// assert there are 0, then 1, then 0.
let before = count_allocations_by_size(1);
let b = Box::new(1i8);
assert_eq!(count_allocations_by_size(1), before + 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(1), before);
}
}
| {
assert_eq!(count_allocations_by_size(28), 0);
let b = Box::new_in([3.0f32; 7], A);
assert_eq!(count_allocations_by_size(28), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(28), 0);
} | identifier_body |
lib.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
#![cfg_attr(feature = "allocator_api", feature(nonnull_slice_from_raw_parts))]
#[cfg(test)]
#[macro_use]
extern crate std;
use scudo_sys::{scudo_allocate, scudo_deallocate, scudo_print_stats, SCUDO_MIN_ALIGN};
use core::alloc::{GlobalAlloc, Layout};
use core::cmp::max;
/// Zero sized type representing the global static scudo allocator declared in C.
#[derive(Clone, Copy)]
pub struct GlobalScudoAllocator;
/// Returns `layout` or the minimum size/align layout for scudo if its too small.
fn fit_layout(layout: Layout) -> Layout {
// SAFETY: SCUDO_MIN_ALIGN is constant and known to be powers of 2.
let min_align = unsafe { SCUDO_MIN_ALIGN } as usize;
let align = max(min_align, layout.align());
// SAFETY: Size and align are good by construction.
unsafe { Layout::from_size_align_unchecked(layout.size(), align) }
}
unsafe impl GlobalAlloc for GlobalScudoAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let layout = fit_layout(layout);
scudo_allocate(layout.size(), layout.align()) as _
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr as _, layout.size(), layout.align());
}
}
impl GlobalScudoAllocator {
/// Prints the global Scudo allocator's internal statistics.
pub fn print_stats() {
unsafe { scudo_print_stats() }
}
}
#[cfg(feature = "allocator_api")]
use core::alloc::AllocError;
#[cfg(feature = "allocator_api")]
use core::ptr::NonNull;
#[cfg(feature = "allocator_api")]
unsafe impl core::alloc::Allocator for GlobalScudoAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let layout = fit_layout(layout);
// TODO(cneo): Scudo buckets and therefore overallocates. Use SizeClassMap to
// return the correct length for the slice?
let ptr = unsafe { scudo_allocate(layout.size(), layout.align()) } as _;
let n = NonNull::new(ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(n, layout.size()))
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr.as_ptr() as _, layout.size(), layout.align());
}
}
#[cfg(test)]
pub mod test {
use super::*;
use std::prelude::v1::*;
use core::alloc::Layout;
use libc::{c_ulong, c_void, size_t};
use scudo_sys::{scudo_disable, scudo_enable, scudo_iterate};
extern "C" fn contains(_address: c_ulong, size: size_t, pair: *mut c_void) {
let (target_size, count) = unsafe { &mut *(pair as *mut (usize, usize)) };
if size == *target_size |
}
/// Test-only function that returns the number of allocations of a given size.
fn count_allocations_by_size(size: usize) -> usize {
let mut size_and_count = (size, 0usize);
unsafe {
scudo_disable();
scudo_iterate(
contains,
&mut size_and_count as *mut (usize, usize) as *mut c_void,
);
scudo_enable();
}
size_and_count.1
}
#[test]
fn test_alloc_and_dealloc_use_scudo() {
let a = GlobalScudoAllocator;
let layout = Layout::from_size_align(4242, 16).unwrap();
assert_eq!(count_allocations_by_size(4242), 0);
let p = unsafe { a.alloc(layout) };
assert_eq!(count_allocations_by_size(4242), 1);
unsafe { a.dealloc(p, layout) };
assert_eq!(count_allocations_by_size(4242), 0);
}
#[global_allocator]
static A: GlobalScudoAllocator = GlobalScudoAllocator;
#[test]
fn test_vec_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_1337), 0);
let mut v = vec![8u8; 8200_1337];
assert_eq!(count_allocations_by_size(8200_1337), 1);
v.clear();
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_1337), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_vec_with_custom_allocator_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_4242), 0);
let mut v = Vec::<u8, GlobalScudoAllocator>::with_capacity_in(8200_4242, A);
assert_eq!(count_allocations_by_size(8200_4242), 1);
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_4242), 0);
}
#[test]
fn test_box_uses_scudo() {
assert_eq!(count_allocations_by_size(20), 0);
let b = Box::new([3.0f32; 5]);
assert_eq!(count_allocations_by_size(20), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(20), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_box_with_custom_allocator_uses_scudo() {
assert_eq!(count_allocations_by_size(28), 0);
let b = Box::new_in([3.0f32; 7], A);
assert_eq!(count_allocations_by_size(28), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(28), 0);
}
#[test]
fn test_1byte_box_uses_scudo() {
// Unlike the other arbitrary size allocations, it seems
// Rust's test harness does have some 1 byte allocations so we cannot
// assert there are 0, then 1, then 0.
let before = count_allocations_by_size(1);
let b = Box::new(1i8);
assert_eq!(count_allocations_by_size(1), before + 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(1), before);
}
}
| {
*count += 1;
} | conditional_block |
lib.rs | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![no_std]
#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
#![cfg_attr(feature = "allocator_api", feature(nonnull_slice_from_raw_parts))]
#[cfg(test)]
#[macro_use]
extern crate std;
use scudo_sys::{scudo_allocate, scudo_deallocate, scudo_print_stats, SCUDO_MIN_ALIGN};
use core::alloc::{GlobalAlloc, Layout};
use core::cmp::max;
/// Zero sized type representing the global static scudo allocator declared in C.
#[derive(Clone, Copy)]
pub struct GlobalScudoAllocator;
/// Returns `layout` or the minimum size/align layout for scudo if its too small.
fn fit_layout(layout: Layout) -> Layout {
// SAFETY: SCUDO_MIN_ALIGN is constant and known to be powers of 2.
let min_align = unsafe { SCUDO_MIN_ALIGN } as usize;
let align = max(min_align, layout.align());
// SAFETY: Size and align are good by construction.
unsafe { Layout::from_size_align_unchecked(layout.size(), align) }
}
unsafe impl GlobalAlloc for GlobalScudoAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let layout = fit_layout(layout);
scudo_allocate(layout.size(), layout.align()) as _
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr as _, layout.size(), layout.align());
}
}
impl GlobalScudoAllocator {
/// Prints the global Scudo allocator's internal statistics.
pub fn print_stats() {
unsafe { scudo_print_stats() }
}
}
#[cfg(feature = "allocator_api")]
use core::alloc::AllocError;
#[cfg(feature = "allocator_api")]
use core::ptr::NonNull;
#[cfg(feature = "allocator_api")]
unsafe impl core::alloc::Allocator for GlobalScudoAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let layout = fit_layout(layout);
// TODO(cneo): Scudo buckets and therefore overallocates. Use SizeClassMap to
// return the correct length for the slice?
let ptr = unsafe { scudo_allocate(layout.size(), layout.align()) } as _;
let n = NonNull::new(ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(n, layout.size()))
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
let layout = fit_layout(layout);
scudo_deallocate(ptr.as_ptr() as _, layout.size(), layout.align());
}
}
#[cfg(test)]
pub mod test {
use super::*;
use std::prelude::v1::*;
use core::alloc::Layout;
use libc::{c_ulong, c_void, size_t};
use scudo_sys::{scudo_disable, scudo_enable, scudo_iterate};
extern "C" fn contains(_address: c_ulong, size: size_t, pair: *mut c_void) {
let (target_size, count) = unsafe { &mut *(pair as *mut (usize, usize)) };
if size == *target_size {
*count += 1;
}
}
/// Test-only function that returns the number of allocations of a given size.
fn count_allocations_by_size(size: usize) -> usize {
let mut size_and_count = (size, 0usize);
unsafe {
scudo_disable();
scudo_iterate(
contains,
&mut size_and_count as *mut (usize, usize) as *mut c_void,
);
scudo_enable();
}
size_and_count.1
}
#[test]
fn test_alloc_and_dealloc_use_scudo() {
let a = GlobalScudoAllocator;
let layout = Layout::from_size_align(4242, 16).unwrap();
assert_eq!(count_allocations_by_size(4242), 0);
let p = unsafe { a.alloc(layout) };
assert_eq!(count_allocations_by_size(4242), 1);
unsafe { a.dealloc(p, layout) };
assert_eq!(count_allocations_by_size(4242), 0);
}
#[global_allocator]
static A: GlobalScudoAllocator = GlobalScudoAllocator;
#[test]
fn test_vec_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_1337), 0);
let mut v = vec![8u8; 8200_1337];
assert_eq!(count_allocations_by_size(8200_1337), 1);
v.clear();
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_1337), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_vec_with_custom_allocator_uses_scudo() {
assert_eq!(count_allocations_by_size(8200_4242), 0);
let mut v = Vec::<u8, GlobalScudoAllocator>::with_capacity_in(8200_4242, A);
assert_eq!(count_allocations_by_size(8200_4242), 1);
v.shrink_to_fit();
assert_eq!(count_allocations_by_size(8200_4242), 0);
}
#[test]
fn test_box_uses_scudo() {
assert_eq!(count_allocations_by_size(20), 0);
let b = Box::new([3.0f32; 5]);
assert_eq!(count_allocations_by_size(20), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(20), 0);
}
#[cfg(feature = "allocator_api")]
#[test]
fn test_box_with_custom_allocator_uses_scudo() {
assert_eq!(count_allocations_by_size(28), 0);
let b = Box::new_in([3.0f32; 7], A);
assert_eq!(count_allocations_by_size(28), 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(28), 0);
}
#[test]
fn | () {
// Unlike the other arbitrary size allocations, it seems
// Rust's test harness does have some 1 byte allocations so we cannot
// assert there are 0, then 1, then 0.
let before = count_allocations_by_size(1);
let b = Box::new(1i8);
assert_eq!(count_allocations_by_size(1), before + 1);
// Move b
(move || b)();
assert_eq!(count_allocations_by_size(1), before);
}
}
| test_1byte_box_uses_scudo | identifier_name |
transit.rs | /*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#![allow(non_snake_case)]
extern crate astro;
use astro::*;
#[test]
#[allow(unused_variables)]
fn | () {
let eq_point1 = coords::EqPoint{
asc: 40.68021_f64.to_radians(),
dec: 18.04761_f64.to_radians()
};
let eq_point2 = coords::EqPoint{
asc: 41.73129_f64.to_radians(),
dec: 18.44092_f64.to_radians()
};
let eq_point3 = coords::EqPoint{
asc: 42.78204_f64.to_radians(),
dec: 18.82742_f64.to_radians()
};
let geograph_point = coords::GeographPoint{
long: 71.0833_f64.to_radians(),
lat: 42.3333_f64.to_radians(),
};
let Theta0 = 177.74208_f64.to_radians();
let deltaT = time::delta_t(1988, 3);
let (h_rise, m_rise, s_rise) = transit::time(
&transit::TransitType::Rise,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_rise, m_rise), (12, 25));
let (h_transit, m_transit, s_transit) = transit::time(
&transit::TransitType::Transit,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_transit, m_transit), (19, 40));
let (h_set, m_set, s_set) = transit::time(
&transit::TransitType::Set,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_set, m_set), (2, 54));
}
| time | identifier_name |
transit.rs | /*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#![allow(non_snake_case)]
extern crate astro;
use astro::*;
#[test]
#[allow(unused_variables)]
fn time() | let Theta0 = 177.74208_f64.to_radians();
let deltaT = time::delta_t(1988, 3);
let (h_rise, m_rise, s_rise) = transit::time(
&transit::TransitType::Rise,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_rise, m_rise), (12, 25));
let (h_transit, m_transit, s_transit) = transit::time(
&transit::TransitType::Transit,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_transit, m_transit), (19, 40));
let (h_set, m_set, s_set) = transit::time(
&transit::TransitType::Set,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_set, m_set), (2, 54));
}
| {
let eq_point1 = coords::EqPoint{
asc: 40.68021_f64.to_radians(),
dec: 18.04761_f64.to_radians()
};
let eq_point2 = coords::EqPoint{
asc: 41.73129_f64.to_radians(),
dec: 18.44092_f64.to_radians()
};
let eq_point3 = coords::EqPoint{
asc: 42.78204_f64.to_radians(),
dec: 18.82742_f64.to_radians()
};
let geograph_point = coords::GeographPoint{
long: 71.0833_f64.to_radians(),
lat: 42.3333_f64.to_radians(),
};
| identifier_body |
transit.rs | /*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | #![allow(non_snake_case)]
extern crate astro;
use astro::*;
#[test]
#[allow(unused_variables)]
fn time() {
let eq_point1 = coords::EqPoint{
asc: 40.68021_f64.to_radians(),
dec: 18.04761_f64.to_radians()
};
let eq_point2 = coords::EqPoint{
asc: 41.73129_f64.to_radians(),
dec: 18.44092_f64.to_radians()
};
let eq_point3 = coords::EqPoint{
asc: 42.78204_f64.to_radians(),
dec: 18.82742_f64.to_radians()
};
let geograph_point = coords::GeographPoint{
long: 71.0833_f64.to_radians(),
lat: 42.3333_f64.to_radians(),
};
let Theta0 = 177.74208_f64.to_radians();
let deltaT = time::delta_t(1988, 3);
let (h_rise, m_rise, s_rise) = transit::time(
&transit::TransitType::Rise,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_rise, m_rise), (12, 25));
let (h_transit, m_transit, s_transit) = transit::time(
&transit::TransitType::Transit,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_transit, m_transit), (19, 40));
let (h_set, m_set, s_set) = transit::time(
&transit::TransitType::Set,
&transit::TransitBody::StarOrPlanet,
&geograph_point,
&eq_point1,
&eq_point2,
&eq_point3,
Theta0,
deltaT,
0.0
);
assert_eq!((h_set, m_set), (2, 54));
} | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
| random_line_split |
gray.rs | /// Reads in a list of values and returns the gray codes of the length of each value
/// Does so only up to the length of 64 bits (unsigned values)
// io functions
use std::io::prelude::*;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::env;
/// Decimal to Gray Code
fn gray(num: u64) -> u64 {
(num >> 1) ^ num
}
/// Run up to some bit length (u8 <= 64)
fn gray_length(n: u8) -> Vec<u64> {
let size = 2u32.pow(n as u32);
let arr = 0.. size as u64;
arr.map(|x| gray(x)).collect()
}
/// Ouput the binary number for the size
fn out(num: u64, size: u8) {
println!("{:0>width$b}", num, width=size as usize);
}
/// File I/O stuff
fn get_input<P: AsRef<Path>>(fname: P) -> Result< String, Box<Error>> {
let mut file = try!(File::open(fname));
let mut cntnt = String::new();
try!(file.read_to_string(&mut cntnt));
Ok(cntnt)
}
/// String helper
fn s_parser(item: &str) -> u8 {
let ret = match item.trim().parse::<u8>() {
Ok(n) => n,
Err(_) => 0,
};
ret
}
/// Parse File
fn parse(contents: String) -> Vec<u8> {
let vals: Vec<u8> = contents.split("\n").map(|x| s_parser(x)).collect();
vals
}
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
let vals = match get_input(path) { | for val in vals {
for gray in gray_length(val) {
out(gray, 3);
}
}
} | Ok(n) => parse(n),
Err(why) => panic!("File could not be accessed {}", why),
};
| random_line_split |
gray.rs | /// Reads in a list of values and returns the gray codes of the length of each value
/// Does so only up to the length of 64 bits (unsigned values)
// io functions
use std::io::prelude::*;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::env;
/// Decimal to Gray Code
fn gray(num: u64) -> u64 {
(num >> 1) ^ num
}
/// Run up to some bit length (u8 <= 64)
fn gray_length(n: u8) -> Vec<u64> {
let size = 2u32.pow(n as u32);
let arr = 0.. size as u64;
arr.map(|x| gray(x)).collect()
}
/// Ouput the binary number for the size
fn out(num: u64, size: u8) {
println!("{:0>width$b}", num, width=size as usize);
}
/// File I/O stuff
fn get_input<P: AsRef<Path>>(fname: P) -> Result< String, Box<Error>> {
let mut file = try!(File::open(fname));
let mut cntnt = String::new();
try!(file.read_to_string(&mut cntnt));
Ok(cntnt)
}
/// String helper
fn s_parser(item: &str) -> u8 {
let ret = match item.trim().parse::<u8>() {
Ok(n) => n,
Err(_) => 0,
};
ret
}
/// Parse File
fn parse(contents: String) -> Vec<u8> |
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
let vals = match get_input(path) {
Ok(n) => parse(n),
Err(why) => panic!("File could not be accessed {}", why),
};
for val in vals {
for gray in gray_length(val) {
out(gray, 3);
}
}
}
| {
let vals: Vec<u8> = contents.split("\n").map(|x| s_parser(x)).collect();
vals
} | identifier_body |
gray.rs | /// Reads in a list of values and returns the gray codes of the length of each value
/// Does so only up to the length of 64 bits (unsigned values)
// io functions
use std::io::prelude::*;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::env;
/// Decimal to Gray Code
fn gray(num: u64) -> u64 {
(num >> 1) ^ num
}
/// Run up to some bit length (u8 <= 64)
fn gray_length(n: u8) -> Vec<u64> {
let size = 2u32.pow(n as u32);
let arr = 0.. size as u64;
arr.map(|x| gray(x)).collect()
}
/// Ouput the binary number for the size
fn out(num: u64, size: u8) {
println!("{:0>width$b}", num, width=size as usize);
}
/// File I/O stuff
fn get_input<P: AsRef<Path>>(fname: P) -> Result< String, Box<Error>> {
let mut file = try!(File::open(fname));
let mut cntnt = String::new();
try!(file.read_to_string(&mut cntnt));
Ok(cntnt)
}
/// String helper
fn s_parser(item: &str) -> u8 {
let ret = match item.trim().parse::<u8>() {
Ok(n) => n,
Err(_) => 0,
};
ret
}
/// Parse File
fn parse(contents: String) -> Vec<u8> {
let vals: Vec<u8> = contents.split("\n").map(|x| s_parser(x)).collect();
vals
}
fn | () {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
let vals = match get_input(path) {
Ok(n) => parse(n),
Err(why) => panic!("File could not be accessed {}", why),
};
for val in vals {
for gray in gray_length(val) {
out(gray, 3);
}
}
}
| main | identifier_name |
future.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A type representing values that may be computed concurrently and
* operations for working with them.
*
* # Example
*
* ```rust
* # fn fib(n: uint) -> uint {42};
* # fn make_a_sandwich() {};
* let mut delayed_fib = extra::future::spawn (|| fib(5000) );
* make_a_sandwich();
* println!("fib(5000) = {}", delayed_fib.get())
* ```
*/
#[allow(missing_doc)];
use std::cell::Cell;
use std::comm::{PortOne, oneshot};
use std::task;
use std::util::replace;
/// A type encapsulating the result of a computation which may not be complete
pub struct Future<A> {
priv state: FutureState<A>,
}
enum FutureState<A> {
Pending(~fn() -> A),
Evaluating,
Forced(A)
}
/// Methods on the `future` type
impl<A:Clone> Future<A> {
pub fn get(&mut self) -> A {
//! Get the value of the future.
(*(self.get_ref())).clone()
}
}
impl<A> Future<A> {
/// Gets the value from this future, forcing evaluation.
pub fn unwrap(self) -> A {
let mut this = self;
this.get_ref();
let state = replace(&mut this.state, Evaluating);
match state {
Forced(v) => v,
_ => fail2!( "Logic error." ),
}
}
pub fn get_ref<'a>(&'a mut self) -> &'a A |
pub fn from_value(val: A) -> Future<A> {
/*!
* Create a future from a value.
*
* The value is immediately available and calling `get` later will
* not block.
*/
Future {state: Forced(val)}
}
pub fn from_fn(f: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a function.
*
* The first time that the value is requested it will be retrieved by
* calling the function. Note that this function is a local
* function. It is not spawned into another task.
*/
Future {state: Pending(f)}
}
}
impl<A:Send> Future<A> {
pub fn from_port(port: PortOne<A>) -> Future<A> {
/*!
* Create a future from a port
*
* The first time that the value is requested the task will block
* waiting for the result to be received on the port.
*/
let port = Cell::new(port);
do Future::from_fn {
port.take().recv()
}
}
pub fn spawn(blk: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a unique closure.
*
* The closure will be run in a new task and its result used as the
* value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with(chan) |chan| {
chan.send(blk());
}
Future::from_port(port)
}
pub fn spawn_with<B: Send>(v: B, blk: ~fn(B) -> A) -> Future<A> {
/*!
* Create a future from a unique closure taking one argument.
*
* The closure and its argument will be moved into a new task. The
* closure will be run and its result used as the value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with((v, chan)) |(v, chan)| {
chan.send(blk(v));
}
Future::from_port(port)
}
}
#[cfg(test)]
mod test {
use future::Future;
use std::cell::Cell;
use std::comm::oneshot;
use std::task;
#[test]
fn test_from_value() {
let mut f = Future::from_value(~"snail");
assert_eq!(f.get(), ~"snail");
}
#[test]
fn test_from_port() {
let (po, ch) = oneshot();
ch.send(~"whale");
let mut f = Future::from_port(po);
assert_eq!(f.get(), ~"whale");
}
#[test]
fn test_from_fn() {
let mut f = Future::from_fn(|| ~"brail");
assert_eq!(f.get(), ~"brail");
}
#[test]
fn test_interface_get() {
let mut f = Future::from_value(~"fail");
assert_eq!(f.get(), ~"fail");
}
#[test]
fn test_interface_unwrap() {
let f = Future::from_value(~"fail");
assert_eq!(f.unwrap(), ~"fail");
}
#[test]
fn test_get_ref_method() {
let mut f = Future::from_value(22);
assert_eq!(*f.get_ref(), 22);
}
#[test]
fn test_spawn() {
let mut f = Future::spawn(|| ~"bale");
assert_eq!(f.get(), ~"bale");
}
#[test]
fn test_spawn_with() {
let mut f = Future::spawn_with(~"gale", |s| { s });
assert_eq!(f.get(), ~"gale");
}
#[test]
#[should_fail]
fn test_futurefail() {
let mut f = Future::spawn(|| fail2!());
let _x: ~str = f.get();
}
#[test]
fn test_sendable_future() {
let expected = "schlorf";
let f = Cell::new(do Future::spawn { expected });
do task::spawn {
let mut f = f.take();
let actual = f.get();
assert_eq!(actual, expected);
}
}
}
| {
/*!
* Executes the future's closure and then returns a borrowed
* pointer to the result. The borrowed pointer lasts as long as
* the future.
*/
match self.state {
Forced(ref v) => return v,
Evaluating => fail2!("Recursive forcing of future!"),
Pending(_) => {
match replace(&mut self.state, Evaluating) {
Forced(_) | Evaluating => fail2!("Logic error."),
Pending(f) => {
self.state = Forced(f());
self.get_ref()
}
}
}
}
} | identifier_body |
future.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A type representing values that may be computed concurrently and
* operations for working with them.
*
* # Example
*
* ```rust
* # fn fib(n: uint) -> uint {42};
* # fn make_a_sandwich() {};
* let mut delayed_fib = extra::future::spawn (|| fib(5000) );
* make_a_sandwich();
* println!("fib(5000) = {}", delayed_fib.get())
* ```
*/
#[allow(missing_doc)];
use std::cell::Cell;
use std::comm::{PortOne, oneshot};
use std::task;
use std::util::replace;
/// A type encapsulating the result of a computation which may not be complete
pub struct Future<A> {
priv state: FutureState<A>,
}
enum FutureState<A> {
Pending(~fn() -> A),
Evaluating,
Forced(A)
}
/// Methods on the `future` type
impl<A:Clone> Future<A> {
pub fn get(&mut self) -> A {
//! Get the value of the future.
(*(self.get_ref())).clone()
}
}
impl<A> Future<A> {
/// Gets the value from this future, forcing evaluation.
pub fn unwrap(self) -> A {
let mut this = self;
this.get_ref();
let state = replace(&mut this.state, Evaluating);
match state {
Forced(v) => v,
_ => fail2!( "Logic error." ),
}
}
pub fn get_ref<'a>(&'a mut self) -> &'a A {
/*!
* Executes the future's closure and then returns a borrowed
* pointer to the result. The borrowed pointer lasts as long as
* the future.
*/
match self.state {
Forced(ref v) => return v,
Evaluating => fail2!("Recursive forcing of future!"),
Pending(_) => {
match replace(&mut self.state, Evaluating) {
Forced(_) | Evaluating => fail2!("Logic error."),
Pending(f) => {
self.state = Forced(f());
self.get_ref()
}
}
}
}
}
pub fn from_value(val: A) -> Future<A> {
/*!
* Create a future from a value.
*
* The value is immediately available and calling `get` later will
* not block.
*/
Future {state: Forced(val)}
}
pub fn from_fn(f: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a function.
*
* The first time that the value is requested it will be retrieved by
* calling the function. Note that this function is a local
* function. It is not spawned into another task.
*/
Future {state: Pending(f)}
}
}
impl<A:Send> Future<A> {
pub fn from_port(port: PortOne<A>) -> Future<A> {
/*!
* Create a future from a port
*
* The first time that the value is requested the task will block
* waiting for the result to be received on the port.
*/
let port = Cell::new(port);
do Future::from_fn {
port.take().recv()
}
}
pub fn spawn(blk: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a unique closure.
*
* The closure will be run in a new task and its result used as the
* value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with(chan) |chan| {
chan.send(blk());
}
Future::from_port(port)
}
pub fn spawn_with<B: Send>(v: B, blk: ~fn(B) -> A) -> Future<A> {
/*!
* Create a future from a unique closure taking one argument.
* |
do task::spawn_with((v, chan)) |(v, chan)| {
chan.send(blk(v));
}
Future::from_port(port)
}
}
#[cfg(test)]
mod test {
use future::Future;
use std::cell::Cell;
use std::comm::oneshot;
use std::task;
#[test]
fn test_from_value() {
let mut f = Future::from_value(~"snail");
assert_eq!(f.get(), ~"snail");
}
#[test]
fn test_from_port() {
let (po, ch) = oneshot();
ch.send(~"whale");
let mut f = Future::from_port(po);
assert_eq!(f.get(), ~"whale");
}
#[test]
fn test_from_fn() {
let mut f = Future::from_fn(|| ~"brail");
assert_eq!(f.get(), ~"brail");
}
#[test]
fn test_interface_get() {
let mut f = Future::from_value(~"fail");
assert_eq!(f.get(), ~"fail");
}
#[test]
fn test_interface_unwrap() {
let f = Future::from_value(~"fail");
assert_eq!(f.unwrap(), ~"fail");
}
#[test]
fn test_get_ref_method() {
let mut f = Future::from_value(22);
assert_eq!(*f.get_ref(), 22);
}
#[test]
fn test_spawn() {
let mut f = Future::spawn(|| ~"bale");
assert_eq!(f.get(), ~"bale");
}
#[test]
fn test_spawn_with() {
let mut f = Future::spawn_with(~"gale", |s| { s });
assert_eq!(f.get(), ~"gale");
}
#[test]
#[should_fail]
fn test_futurefail() {
let mut f = Future::spawn(|| fail2!());
let _x: ~str = f.get();
}
#[test]
fn test_sendable_future() {
let expected = "schlorf";
let f = Cell::new(do Future::spawn { expected });
do task::spawn {
let mut f = f.take();
let actual = f.get();
assert_eq!(actual, expected);
}
}
} | * The closure and its argument will be moved into a new task. The
* closure will be run and its result used as the value of the future.
*/
let (port, chan) = oneshot(); | random_line_split |
future.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* A type representing values that may be computed concurrently and
* operations for working with them.
*
* # Example
*
* ```rust
* # fn fib(n: uint) -> uint {42};
* # fn make_a_sandwich() {};
* let mut delayed_fib = extra::future::spawn (|| fib(5000) );
* make_a_sandwich();
* println!("fib(5000) = {}", delayed_fib.get())
* ```
*/
#[allow(missing_doc)];
use std::cell::Cell;
use std::comm::{PortOne, oneshot};
use std::task;
use std::util::replace;
/// A type encapsulating the result of a computation which may not be complete
pub struct Future<A> {
priv state: FutureState<A>,
}
enum FutureState<A> {
Pending(~fn() -> A),
Evaluating,
Forced(A)
}
/// Methods on the `future` type
impl<A:Clone> Future<A> {
pub fn get(&mut self) -> A {
//! Get the value of the future.
(*(self.get_ref())).clone()
}
}
impl<A> Future<A> {
/// Gets the value from this future, forcing evaluation.
pub fn unwrap(self) -> A {
let mut this = self;
this.get_ref();
let state = replace(&mut this.state, Evaluating);
match state {
Forced(v) => v,
_ => fail2!( "Logic error." ),
}
}
pub fn get_ref<'a>(&'a mut self) -> &'a A {
/*!
* Executes the future's closure and then returns a borrowed
* pointer to the result. The borrowed pointer lasts as long as
* the future.
*/
match self.state {
Forced(ref v) => return v,
Evaluating => fail2!("Recursive forcing of future!"),
Pending(_) => {
match replace(&mut self.state, Evaluating) {
Forced(_) | Evaluating => fail2!("Logic error."),
Pending(f) => {
self.state = Forced(f());
self.get_ref()
}
}
}
}
}
pub fn from_value(val: A) -> Future<A> {
/*!
* Create a future from a value.
*
* The value is immediately available and calling `get` later will
* not block.
*/
Future {state: Forced(val)}
}
pub fn from_fn(f: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a function.
*
* The first time that the value is requested it will be retrieved by
* calling the function. Note that this function is a local
* function. It is not spawned into another task.
*/
Future {state: Pending(f)}
}
}
impl<A:Send> Future<A> {
pub fn from_port(port: PortOne<A>) -> Future<A> {
/*!
* Create a future from a port
*
* The first time that the value is requested the task will block
* waiting for the result to be received on the port.
*/
let port = Cell::new(port);
do Future::from_fn {
port.take().recv()
}
}
pub fn spawn(blk: ~fn() -> A) -> Future<A> {
/*!
* Create a future from a unique closure.
*
* The closure will be run in a new task and its result used as the
* value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with(chan) |chan| {
chan.send(blk());
}
Future::from_port(port)
}
pub fn spawn_with<B: Send>(v: B, blk: ~fn(B) -> A) -> Future<A> {
/*!
* Create a future from a unique closure taking one argument.
*
* The closure and its argument will be moved into a new task. The
* closure will be run and its result used as the value of the future.
*/
let (port, chan) = oneshot();
do task::spawn_with((v, chan)) |(v, chan)| {
chan.send(blk(v));
}
Future::from_port(port)
}
}
#[cfg(test)]
mod test {
use future::Future;
use std::cell::Cell;
use std::comm::oneshot;
use std::task;
#[test]
fn test_from_value() {
let mut f = Future::from_value(~"snail");
assert_eq!(f.get(), ~"snail");
}
#[test]
fn test_from_port() {
let (po, ch) = oneshot();
ch.send(~"whale");
let mut f = Future::from_port(po);
assert_eq!(f.get(), ~"whale");
}
#[test]
fn test_from_fn() {
let mut f = Future::from_fn(|| ~"brail");
assert_eq!(f.get(), ~"brail");
}
#[test]
fn test_interface_get() {
let mut f = Future::from_value(~"fail");
assert_eq!(f.get(), ~"fail");
}
#[test]
fn test_interface_unwrap() {
let f = Future::from_value(~"fail");
assert_eq!(f.unwrap(), ~"fail");
}
#[test]
fn test_get_ref_method() {
let mut f = Future::from_value(22);
assert_eq!(*f.get_ref(), 22);
}
#[test]
fn test_spawn() {
let mut f = Future::spawn(|| ~"bale");
assert_eq!(f.get(), ~"bale");
}
#[test]
fn | () {
let mut f = Future::spawn_with(~"gale", |s| { s });
assert_eq!(f.get(), ~"gale");
}
#[test]
#[should_fail]
fn test_futurefail() {
let mut f = Future::spawn(|| fail2!());
let _x: ~str = f.get();
}
#[test]
fn test_sendable_future() {
let expected = "schlorf";
let f = Cell::new(do Future::spawn { expected });
do task::spawn {
let mut f = f.take();
let actual = f.get();
assert_eq!(actual, expected);
}
}
}
| test_spawn_with | identifier_name |
object_safety.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy,Clone,Debug)]
pub enum | {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
// We can't monomorphize things like `fn foo<A>(...)`.
if!method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad
// fn foo(&self) -> Option<Self> // bad
// fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
}
| MethodViolationCode | identifier_name |
object_safety.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy,Clone,Debug)]
pub enum MethodViolationCode {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
// We can't monomorphize things like `fn foo<A>(...)`.
if!method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad | // fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
} | // fn foo(&self) -> Option<Self> // bad | random_line_split |
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true | //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_match() {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
}
pub fn main() {}
| 'b: {} | conditional_block |
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_match() {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
} |
pub fn main() {} | random_line_split |
|
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_match() {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
}
pub fn main() | {} | identifier_body |
|
label_break_value_illegal_uses.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(label_break_value)]
// These are forbidden occurrences of label-break-value
fn labeled_unsafe() {
unsafe 'b: {} //~ ERROR expected one of `extern`, `fn`, or `{`
}
fn labeled_if() {
if true 'b: {} //~ ERROR expected `{`, found `'b`
}
fn labeled_else() {
if true {} else 'b: {} //~ ERROR expected `{`, found `'b`
}
fn | () {
match false 'b: {} //~ ERROR expected one of `.`, `?`, `{`, or an operator
}
pub fn main() {}
| labeled_match | identifier_name |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn initial() -> Result<Config> {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?;
if!config_path.exists() {
println!("config file not found at {:?}", config_path);
if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => {
println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() |
Ok(partners_config)
} | {
partners_config.add_author(&author)?;
} | conditional_block |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn initial() -> Result<Config> {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?; | if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => {
println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() {
partners_config.add_author(&author)?;
}
Ok(partners_config)
} |
if !config_path.exists() {
println!("config file not found at {:?}", config_path);
| random_line_split |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn initial() -> Result<Config> | println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() {
partners_config.add_author(&author)?;
}
Ok(partners_config)
} | {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?;
if !config_path.exists() {
println!("config file not found at {:?}", config_path);
if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => { | identifier_body |
initial.rs | use git::Config;
use helpers;
use author::Author;
use author_selection::AuthorSelection;
use xdg::BaseDirectories;
use Result;
use CannotProcede;
pub fn | () -> Result<Config> {
let xdg_dirs = BaseDirectories::with_prefix("partners")?;
let config_path = xdg_dirs.place_config_file("partners.cfg")?;
if!config_path.exists() {
println!("config file not found at {:?}", config_path);
if helpers::confirm("do you want to create it?")? {
helpers::create_config_file(&config_path)?;
} else {
Err(CannotProcede)?;
}
}
let partners_config = Config::File(config_path);
let author = match Config::Local.current_author() {
Ok(author) => author,
Err(_) => {
println!("It seems like the current git author is not known to partners");
let nick = Config::Local.nick().or_else(|_| {
println!("Please enter a nickname you would like to use");
helpers::query_required("Nick")
})?;
let name = Config::Local.user_name().or_else(|_| {
println!("Unable to determine your name from git configuration");
helpers::query_required("Name")
})?;
let email = Config::Local.user_email().ok().or_else(|| {
println!("Unable to determine your email address from git configuration");
helpers::query_optional("Email").ok().and_then(|v| v)
});
let author = Author { nick: nick, name: name, email: email };
let selection = AuthorSelection::new(&partners_config, vec![author.clone()])?;
Config::Global.set_current_author(&selection)?;
author
}
};
if partners_config.find_author(&author.nick).is_none() {
partners_config.add_author(&author)?;
}
Ok(partners_config)
} | initial | identifier_name |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font, font_context};
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font_list, font_template};
|
#[cfg(target_os = "macos")]
pub use crate::platform::macos::{font, font_context, font_list, font_template};
#[cfg(any(target_os = "linux", target_os = "android"))]
mod freetype {
use libc::c_char;
use std::ffi::CStr;
use std::str;
/// Creates a String from the given null-terminated buffer.
/// Panics if the buffer does not contain UTF-8.
unsafe fn c_str_to_string(s: *const c_char) -> String {
str::from_utf8(CStr::from_ptr(s).to_bytes())
.unwrap()
.to_owned()
}
pub mod font;
pub mod font_context;
#[cfg(target_os = "linux")]
pub mod font_list;
#[cfg(target_os = "android")]
mod android {
pub mod font_list;
}
#[cfg(target_os = "android")]
pub use self::android::font_list;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod font_template;
}
#[cfg(target_os = "macos")]
mod macos {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
#[cfg(target_os = "windows")]
mod windows {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
} | #[cfg(target_os = "windows")]
pub use crate::platform::windows::{font, font_context, font_list, font_template}; | random_line_split |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font, font_context};
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font_list, font_template};
#[cfg(target_os = "windows")]
pub use crate::platform::windows::{font, font_context, font_list, font_template};
#[cfg(target_os = "macos")]
pub use crate::platform::macos::{font, font_context, font_list, font_template};
#[cfg(any(target_os = "linux", target_os = "android"))]
mod freetype {
use libc::c_char;
use std::ffi::CStr;
use std::str;
/// Creates a String from the given null-terminated buffer.
/// Panics if the buffer does not contain UTF-8.
unsafe fn c_str_to_string(s: *const c_char) -> String |
pub mod font;
pub mod font_context;
#[cfg(target_os = "linux")]
pub mod font_list;
#[cfg(target_os = "android")]
mod android {
pub mod font_list;
}
#[cfg(target_os = "android")]
pub use self::android::font_list;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod font_template;
}
#[cfg(target_os = "macos")]
mod macos {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
#[cfg(target_os = "windows")]
mod windows {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
| {
str::from_utf8(CStr::from_ptr(s).to_bytes())
.unwrap()
.to_owned()
} | identifier_body |
mod.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font, font_context};
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use crate::platform::freetype::{font_list, font_template};
#[cfg(target_os = "windows")]
pub use crate::platform::windows::{font, font_context, font_list, font_template};
#[cfg(target_os = "macos")]
pub use crate::platform::macos::{font, font_context, font_list, font_template};
#[cfg(any(target_os = "linux", target_os = "android"))]
mod freetype {
use libc::c_char;
use std::ffi::CStr;
use std::str;
/// Creates a String from the given null-terminated buffer.
/// Panics if the buffer does not contain UTF-8.
unsafe fn | (s: *const c_char) -> String {
str::from_utf8(CStr::from_ptr(s).to_bytes())
.unwrap()
.to_owned()
}
pub mod font;
pub mod font_context;
#[cfg(target_os = "linux")]
pub mod font_list;
#[cfg(target_os = "android")]
mod android {
pub mod font_list;
}
#[cfg(target_os = "android")]
pub use self::android::font_list;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub mod font_template;
}
#[cfg(target_os = "macos")]
mod macos {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
#[cfg(target_os = "windows")]
mod windows {
pub mod font;
pub mod font_context;
pub mod font_list;
pub mod font_template;
}
| c_str_to_string | identifier_name |
base.rs | if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for | /// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
}
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync +'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> +?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
}
Selection::InlineFragment(Spanning {
item: ref fragment,
start: ref start_pos,
..
}) => {
if is_excluded(&fragment.directives, executor.variables()) {
continue;
}
let sub_exec = executor.type_sub_executor(
fragment.type_condition.as_ref().map(|c| c.item),
Some(&fragment.selection_set[..]),
);
if let Some(ref type_condition) = fragment.type_condition {
// Check whether the type matches the type condition.
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, type_condition.item)
{
| /// resolving GraphQL values even when a concrete Rust type is erased.
/// | random_line_split |
base.rs | if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for
/// resolving GraphQL values even when a concrete Rust type is erased.
///
/// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
}
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync +'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> +?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => | {
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
}
Selection::InlineFragment(Spanning {
item: ref fragment,
start: ref start_pos,
..
}) => {
if is_excluded(&fragment.directives, executor.variables()) {
continue;
}
let sub_exec = executor.type_sub_executor(
fragment.type_condition.as_ref().map(|c| c.item),
Some(&fragment.selection_set[..]),
);
if let Some(ref type_condition) = fragment.type_condition {
// Check whether the type matches the type condition.
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, type_condition.item)
{
| {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name | conditional_block |
base.rs | if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for
/// resolving GraphQL values even when a concrete Rust type is erased.
///
/// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> |
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync +'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> +?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_into(result, &k, v);
}
} else if let Err(e) = sub_result {
sub_exec.push_error_at(e, *start_pos);
}
}
}
Selection::InlineFragment(Spanning {
item: ref fragment,
start: ref start_pos,
..
}) => {
if is_excluded(&fragment.directives, executor.variables()) {
continue;
}
let sub_exec = executor.type_sub_executor(
fragment.type_condition.as_ref().map(|c| c.item),
Some(&fragment.selection_set[..]),
);
if let Some(ref type_condition) = fragment.type_condition {
// Check whether the type matches the type condition.
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, type_condition.item)
{
| {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
} | identifier_body |
base.rs | {
/// ## Scalar types
///
/// Scalar types appear as the leaf nodes of GraphQL queries. Strings,\
/// numbers, and booleans are the built in types, and while it's possible\
/// to define your own, it's relatively uncommon.
Scalar,
/// ## Object types
///
/// The most common type to be implemented by users. Objects have fields\
/// and can implement interfaces.
Object,
/// ## Interface types
///
/// Interface types are used to represent overlapping fields between\
/// multiple types, and can be queried for their concrete type.
Interface,
/// ## Union types
///
/// Unions are similar to interfaces but can not contain any fields on\
/// their own.
Union,
/// ## Enum types
///
/// Like scalars, enum types appear as the leaf nodes of GraphQL queries.
Enum,
/// ## Input objects
///
/// Represents complex values provided in queries _into_ the system.
#[graphql(name = "INPUT_OBJECT")]
InputObject,
/// ## List types
///
/// Represent lists of other types. This library provides implementations\
/// for vectors and slices, but other Rust types can be extended to serve\
/// as GraphQL lists.
List,
/// ## Non-null types
///
/// In GraphQL, nullable types are the default. By putting a `!` after a\
/// type, it becomes non-nullable.
#[graphql(name = "NON_NULL")]
NonNull,
}
/// Field argument container
#[derive(Debug)]
pub struct Arguments<'a, S = DefaultScalarValue> {
args: Option<IndexMap<&'a str, InputValue<S>>>,
}
impl<'a, S> Arguments<'a, S> {
#[doc(hidden)]
pub fn new(
mut args: Option<IndexMap<&'a str, InputValue<S>>>,
meta_args: &'a Option<Vec<Argument<S>>>,
) -> Self
where
S: Clone,
{
if meta_args.is_some() && args.is_none() {
args = Some(IndexMap::new());
}
if let (Some(args), Some(meta_args)) = (&mut args, meta_args) {
for arg in meta_args {
let arg_name = arg.name.as_str();
if args.get(arg_name).map_or(true, InputValue::is_null) {
if let Some(val) = arg.default_value.as_ref() {
args.insert(arg_name, val.clone());
}
}
}
}
Self { args }
}
/// Gets an argument by the given `name` and converts it into the desired
/// type.
///
/// If the argument is found, or a default argument has been provided, the
/// given [`InputValue`] will be converted into the type `T`.
///
/// Returns [`None`] if an argument with such `name` is not present.
///
/// # Errors
///
/// If the [`FromInputValue`] conversion fails.
pub fn get<T>(&self, name: &str) -> FieldResult<Option<T>, S>
where
T: FromInputValue<S>,
T::Error: IntoFieldError<S>,
{
self.args
.as_ref()
.and_then(|args| args.get(name))
.map(InputValue::convert)
.transpose()
.map_err(IntoFieldError::into_field_error)
}
}
/// Primary trait used to resolve GraphQL values.
///
/// All the convenience macros ultimately expand into an implementation of this trait for the given
/// type. The macros remove duplicated definitions of fields and arguments, and add type checks on
/// all resolving functions automatically. This can all be done manually too.
///
/// [`GraphQLValue`] provides _some_ convenience methods for you, in the form of optional trait
/// methods. The `type_name` method is mandatory, but other than that, it depends on what type
/// you're exposing:
/// - [Scalars][4], [enums][5], [lists][6] and [non-null wrappers][7] only require `resolve`.
/// - [Interfaces][1] and [objects][3] require `resolve_field` _or_ `resolve` if you want to
/// implement a custom resolution logic (probably not).
/// - [Interfaces][1] and [unions][2] require `resolve_into_type` and `concrete_type_name`.
/// - [Input objects][8] do not require anything.
///
/// # Object safety
///
/// This trait is [object safe][11], therefore may be turned into a [trait object][12] and used for
/// resolving GraphQL values even when a concrete Rust type is erased.
///
/// # Example
///
/// This trait is intended to be used in a conjunction with a [`GraphQLType`] trait. See the example
/// in the documentation of a [`GraphQLType`] trait.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
/// [4]: https://spec.graphql.org/June2018/#sec-Scalars
/// [5]: https://spec.graphql.org/June2018/#sec-Enums
/// [6]: https://spec.graphql.org/June2018/#sec-Type-System.List
/// [7]: https://spec.graphql.org/June2018/#sec-Type-System.Non-Null
/// [8]: https://spec.graphql.org/June2018/#sec-Input-Objects
/// [11]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
/// [12]: https://doc.rust-lang.org/reference/types/trait-object.html
pub trait GraphQLValue<S = DefaultScalarValue>
where
S: ScalarValue,
{
/// Context type for this [`GraphQLValue`].
///
/// It's threaded through a query execution to all affected nodes, and can be used to hold
/// common data, e.g. database connections or request session information.
type Context;
/// Type that may carry additional schema information for this [`GraphQLValue`].
///
/// It can be used to implement a schema that is partly dynamic, meaning that it can use
/// information that is not known at compile time, for instance by reading it from a
/// configuration file at startup.
type TypeInfo;
/// Returns name of the [`GraphQLType`] exposed by this [`GraphQLValue`].
///
/// This function will be called multiple times during a query execution. It must _not_ perform
/// any calculation and _always_ return the same value.
///
/// Usually, it should just call a [`GraphQLType::name`] inside.
fn type_name<'i>(&self, info: &'i Self::TypeInfo) -> Option<&'i str>;
/// Resolves the value of a single field on this [`GraphQLValue`].
///
/// The `arguments` object contains all the specified arguments, with default values being
/// substituted for the ones not provided by the query.
///
/// The `executor` can be used to drive selections into sub-[objects][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_field(
&self,
_info: &Self::TypeInfo,
_field_name: &str,
_arguments: &Arguments<S>,
_executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
panic!("GraphQLValue::resolve_field() must be implemented by objects and interfaces");
}
/// Resolves this [`GraphQLValue`] (being an [interface][1] or an [union][2]) into a concrete
/// downstream [object][3] type.
///
/// Tries to resolve this [`GraphQLValue`] into the provided `type_name`. If the type matches,
/// then passes the instance along to [`Executor::resolve`].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve_into_type(
&self,
info: &Self::TypeInfo,
type_name: &str,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if self.type_name(info).unwrap() == type_name {
self.resolve(info, selection_set, executor)
} else {
panic!(
"GraphQLValue::resolve_into_type() must be implemented by unions and interfaces"
);
}
}
/// Returns the concrete [`GraphQLType`] name for this [`GraphQLValue`] being an [interface][1],
/// an [union][2] or an [object][3].
///
/// # Panics
///
/// The default implementation panics.
///
/// [1]: https://spec.graphql.org/June2018/#sec-Interfaces
/// [2]: https://spec.graphql.org/June2018/#sec-Unions
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
#[allow(unused_variables)]
fn concrete_type_name(&self, context: &Self::Context, info: &Self::TypeInfo) -> String {
panic!(
"GraphQLValue::concrete_type_name() must be implemented by unions, interfaces \
and objects",
);
}
/// Resolves the provided `selection_set` against this [`GraphQLValue`].
///
/// For non-[object][3] types, the `selection_set` will be [`None`] and the value should simply
/// be returned.
///
/// For [objects][3], all fields in the `selection_set` should be resolved. The default
/// implementation uses [`GraphQLValue::resolve_field`] to resolve all fields, including those
/// through a fragment expansion.
///
/// Since the [GraphQL spec specifies][0] that errors during field processing should result in
/// a null-value, this might return `Ok(Null)` in case of a failure. Errors are recorded
/// internally.
///
/// # Panics
///
/// The default implementation panics, if `selection_set` is [`None`].
///
/// [0]: https://spec.graphql.org/June2018/#sec-Errors-and-Non-Nullability
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
fn resolve(
&self,
info: &Self::TypeInfo,
selection_set: Option<&[Selection<S>]>,
executor: &Executor<Self::Context, S>,
) -> ExecutionResult<S> {
if let Some(sel) = selection_set {
let mut res = Object::with_capacity(sel.len());
Ok(
if resolve_selection_set_into(self, info, sel, executor, &mut res) {
Value::Object(res)
} else {
Value::null()
},
)
} else {
panic!("GraphQLValue::resolve() must be implemented by non-object output types");
}
}
}
crate::sa::assert_obj_safe!(GraphQLValue<Context = (), TypeInfo = ()>);
/// Helper alias for naming [trait objects][1] of [`GraphQLValue`].
///
/// [1]: https://doc.rust-lang.org/reference/types/trait-object.html
pub type DynGraphQLValue<S, C, TI> =
dyn GraphQLValue<S, Context = C, TypeInfo = TI> + Send + Sync +'static;
/// Primary trait used to expose Rust types in a GraphQL schema.
///
/// All of the convenience macros ultimately expand into an implementation of
/// this trait for the given type. This can all be done manually.
///
/// # Example
///
/// Manually deriving an [object][3] is straightforward, but tedious. This is the equivalent of the
/// `User` object as shown in the example in the documentation root:
/// ```
/// # use std::collections::HashMap;
/// use juniper::{
/// meta::MetaType, Arguments, Context, DefaultScalarValue, Executor, ExecutionResult,
/// FieldResult, GraphQLType, GraphQLValue, Registry,
/// };
///
/// #[derive(Debug)]
/// struct Database { users: HashMap<String, User> }
/// impl Context for Database {}
///
/// #[derive(Debug)]
/// struct User { id: String, name: String, friend_ids: Vec<String> }
///
/// impl GraphQLType<DefaultScalarValue> for User {
/// fn name(_: &()) -> Option<&'static str> {
/// Some("User")
/// }
///
/// fn meta<'r>(_: &(), registry: &mut Registry<'r>) -> MetaType<'r>
/// where DefaultScalarValue: 'r,
/// {
/// // First, we need to define all fields and their types on this type.
/// //
/// // If we need arguments, want to implement interfaces, or want to add documentation
/// // strings, we can do it here.
/// let fields = &[
/// registry.field::<&String>("id", &()),
/// registry.field::<&String>("name", &()),
/// registry.field::<Vec<&User>>("friends", &()),
/// ];
/// registry.build_object_type::<User>(&(), fields).into_meta()
/// }
/// }
///
/// impl GraphQLValue<DefaultScalarValue> for User {
/// type Context = Database;
/// type TypeInfo = ();
///
/// fn type_name(&self, _: &()) -> Option<&'static str> {
/// <User as GraphQLType>::name(&())
/// }
///
/// fn resolve_field(
/// &self,
/// info: &(),
/// field_name: &str,
/// args: &Arguments,
/// executor: &Executor<Database>
/// ) -> ExecutionResult
/// {
/// // Next, we need to match the queried field name. All arms of this match statement
/// // return `ExecutionResult`, which makes it hard to statically verify that the type you
/// // pass on to `executor.resolve*` actually matches the one that you defined in `meta()`
/// // above.
/// let database = executor.context();
/// match field_name {
/// // Because scalars are defined with another `Context` associated type, you must use
/// // `resolve_with_ctx` here to make the `executor` perform automatic type conversion
/// // of its argument.
/// "id" => executor.resolve_with_ctx(info, &self.id),
/// "name" => executor.resolve_with_ctx(info, &self.name),
///
/// // You pass a vector of `User` objects to `executor.resolve`, and it will determine
/// // which fields of the sub-objects to actually resolve based on the query.
/// // The `executor` instance keeps track of its current position in the query.
/// "friends" => executor.resolve(info,
/// &self.friend_ids.iter()
/// .filter_map(|id| database.users.get(id))
/// .collect::<Vec<_>>()
/// ),
///
/// // We can only reach this panic in two cases: either a mismatch between the defined
/// // schema in `meta()` above, or a validation failed because of a this library bug.
/// //
/// // In either of those two cases, the only reasonable way out is to panic the thread.
/// _ => panic!("Field {} not found on type User", field_name),
/// }
/// }
/// }
/// ```
///
/// [3]: https://spec.graphql.org/June2018/#sec-Objects
pub trait GraphQLType<S = DefaultScalarValue>: GraphQLValue<S>
where
S: ScalarValue,
{
/// Returns name of this [`GraphQLType`] to expose.
///
/// This function will be called multiple times during schema construction. It must _not_
/// perform any calculation and _always_ return the same value.
fn name(info: &Self::TypeInfo) -> Option<&str>;
/// Returns [`MetaType`] representing this [`GraphQLType`].
fn meta<'r>(info: &Self::TypeInfo, registry: &mut Registry<'r, S>) -> MetaType<'r, S>
where
S: 'r;
}
/// Resolver logic for queries'/mutations' selection set.
/// Calls appropriate resolver method for each field or fragment found
/// and then merges returned values into `result` or pushes errors to
/// field's/fragment's sub executor.
///
/// Returns false if any errors occurred and true otherwise.
pub(crate) fn resolve_selection_set_into<T, S>(
instance: &T,
info: &T::TypeInfo,
selection_set: &[Selection<S>],
executor: &Executor<T::Context, S>,
result: &mut Object<S>,
) -> bool
where
T: GraphQLValue<S> +?Sized,
S: ScalarValue,
{
let meta_type = executor
.schema()
.concrete_type_by_name(
instance
.type_name(info)
.expect("Resolving named type's selection set")
.as_ref(),
)
.expect("Type not found in schema");
for selection in selection_set {
match *selection {
Selection::Field(Spanning {
item: ref f,
start: ref start_pos,
..
}) => {
if is_excluded(&f.directives, executor.variables()) {
continue;
}
let response_name = f.alias.as_ref().unwrap_or(&f.name).item;
if f.name.item == "__typename" {
result.add_field(
response_name,
Value::scalar(instance.concrete_type_name(executor.context(), info)),
);
continue;
}
let meta_field = meta_type.field_by_name(f.name.item).unwrap_or_else(|| {
panic!(
"Field {} not found on type {:?}",
f.name.item,
meta_type.name()
)
});
let exec_vars = executor.variables();
let sub_exec = executor.field_sub_executor(
response_name,
f.name.item,
*start_pos,
f.selection_set.as_ref().map(|v| &v[..]),
);
let field_result = instance.resolve_field(
info,
f.name.item,
&Arguments::new(
f.arguments.as_ref().map(|m| {
m.item
.iter()
.map(|&(ref k, ref v)| {
(k.item, v.item.clone().into_const(exec_vars))
})
.collect()
}),
&meta_field.arguments,
),
&sub_exec,
);
match field_result {
Ok(Value::Null) if meta_field.field_type.is_non_null() => return false,
Ok(v) => merge_key_into(result, response_name, v),
Err(e) => {
sub_exec.push_error_at(e, *start_pos);
if meta_field.field_type.is_non_null() {
return false;
}
result.add_field(response_name, Value::null());
}
}
}
Selection::FragmentSpread(Spanning {
item: ref spread,
start: ref start_pos,
..
}) => {
if is_excluded(&spread.directives, executor.variables()) {
continue;
}
let fragment = &executor
.fragment_by_name(spread.name.item)
.expect("Fragment could not be found");
let sub_exec = executor.type_sub_executor(
Some(fragment.type_condition.item),
Some(&fragment.selection_set[..]),
);
let concrete_type_name = instance.concrete_type_name(sub_exec.context(), info);
let type_name = instance.type_name(info);
if executor
.schema()
.is_named_subtype(&concrete_type_name, fragment.type_condition.item)
|| Some(fragment.type_condition.item) == type_name
{
let sub_result = instance.resolve_into_type(
info,
&concrete_type_name,
Some(&fragment.selection_set[..]),
&sub_exec,
);
if let Ok(Value::Object(object)) = sub_result {
for (k, v) in object {
merge_key_ | TypeKind | identifier_name |
|
error.rs | // Copyright 2016 evic Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::fmt;
use std::error;
/// Error types.
#[derive(Debug)]
pub enum Error {
/// An error originating from reading or writing to the underlying buffer.
Io(io::Error),
/// An error related to the provided firmware.
Firmware(String),
/// An error originating from the main application.
CliError(String)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::Firmware(ref err) => write!(f, "Firmware error: {}", err),
Error::CliError(ref err) => write!(f, "CLI error: {}", err),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Io(..) => "eVic IO error",
Error::Firmware(..) => "eVic firmware error",
Error::CliError(..) => "eVic CLI error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Io(ref err) => Some(err),
_ => None
}
}
}
impl From<io::Error> for Error {
fn | (error: io::Error) -> Error {
Error::Io(error)
}
}
| from | identifier_name |
error.rs | // Copyright 2016 evic Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::fmt;
use std::error;
/// Error types.
#[derive(Debug)]
pub enum Error {
/// An error originating from reading or writing to the underlying buffer.
Io(io::Error),
/// An error related to the provided firmware.
Firmware(String),
/// An error originating from the main application.
CliError(String)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Io(..) => "eVic IO error",
Error::Firmware(..) => "eVic firmware error",
Error::CliError(..) => "eVic CLI error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Io(ref err) => Some(err),
_ => None
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error)
}
}
| {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::Firmware(ref err) => write!(f, "Firmware error: {}", err),
Error::CliError(ref err) => write!(f, "CLI error: {}", err),
}
} | identifier_body |
error.rs | // Copyright 2016 evic Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::fmt;
use std::error;
/// Error types.
#[derive(Debug)]
pub enum Error {
/// An error originating from reading or writing to the underlying buffer.
Io(io::Error),
/// An error related to the provided firmware.
Firmware(String),
/// An error originating from the main application.
CliError(String)
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::Firmware(ref err) => write!(f, "Firmware error: {}", err),
Error::CliError(ref err) => write!(f, "CLI error: {}", err),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Io(..) => "eVic IO error",
Error::Firmware(..) => "eVic firmware error",
Error::CliError(..) => "eVic CLI error",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Io(ref err) => Some(err),
_ => None
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::Io(error) | } | } | random_line_split |
key_templates.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and | //
////////////////////////////////////////////////////////////////////////////////
//! This module contains pre-generated [`KeyTemplate`] instances for deterministic AEAD.
use tink_proto::{prost::Message, KeyTemplate};
/// Return a [`KeyTemplate`](tink_proto::KeyTemplate) that generates a AES-SIV key.
pub fn aes_siv_key_template() -> KeyTemplate {
let format = tink_proto::AesSivKeyFormat {
key_size: 64,
version: crate::AES_SIV_KEY_VERSION,
};
let mut serialized_format = Vec::new();
format.encode(&mut serialized_format).unwrap(); // safe: proto-encode
KeyTemplate {
type_url: crate::AES_SIV_TYPE_URL.to_string(),
output_prefix_type: tink_proto::OutputPrefixType::Tink as i32,
value: serialized_format,
}
} | // limitations under the License. | random_line_split |
key_templates.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! This module contains pre-generated [`KeyTemplate`] instances for deterministic AEAD.
use tink_proto::{prost::Message, KeyTemplate};
/// Return a [`KeyTemplate`](tink_proto::KeyTemplate) that generates a AES-SIV key.
pub fn | () -> KeyTemplate {
let format = tink_proto::AesSivKeyFormat {
key_size: 64,
version: crate::AES_SIV_KEY_VERSION,
};
let mut serialized_format = Vec::new();
format.encode(&mut serialized_format).unwrap(); // safe: proto-encode
KeyTemplate {
type_url: crate::AES_SIV_TYPE_URL.to_string(),
output_prefix_type: tink_proto::OutputPrefixType::Tink as i32,
value: serialized_format,
}
}
| aes_siv_key_template | identifier_name |
key_templates.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
//! This module contains pre-generated [`KeyTemplate`] instances for deterministic AEAD.
use tink_proto::{prost::Message, KeyTemplate};
/// Return a [`KeyTemplate`](tink_proto::KeyTemplate) that generates a AES-SIV key.
pub fn aes_siv_key_template() -> KeyTemplate | {
let format = tink_proto::AesSivKeyFormat {
key_size: 64,
version: crate::AES_SIV_KEY_VERSION,
};
let mut serialized_format = Vec::new();
format.encode(&mut serialized_format).unwrap(); // safe: proto-encode
KeyTemplate {
type_url: crate::AES_SIV_TYPE_URL.to_string(),
output_prefix_type: tink_proto::OutputPrefixType::Tink as i32,
value: serialized_format,
}
} | identifier_body |
|
response.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Helpers for decoding and verifying responses for headers.
use std::fmt;
use ethcore::encoded;
use ethcore::header::Header;
use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest};
use rlp::DecoderError;
use bigint::hash::H256;
/// Errors found when decoding headers and verifying with basic constraints.
#[derive(Debug, PartialEq)]
pub enum BasicError {
/// Wrong skip value: expected, found (if any).
WrongSkip(u64, Option<u64>),
/// Wrong start number.
WrongStartNumber(u64, u64),
/// Wrong start hash.
WrongStartHash(H256, H256),
/// Too many headers.
TooManyHeaders(usize, usize),
/// Decoder error.
Decoder(DecoderError),
}
impl From<DecoderError> for BasicError {
fn from(err: DecoderError) -> Self {
BasicError::Decoder(err)
}
}
impl fmt::Display for BasicError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Header response verification error: ")?;
match *self {
BasicError::WrongSkip(ref exp, ref got)
=> write!(f, "wrong skip (expected {}, got {:?})", exp, got),
BasicError::WrongStartNumber(ref exp, ref got)
=> write!(f, "wrong start number (expected {}, got {})", exp, got),
BasicError::WrongStartHash(ref exp, ref got)
=> write!(f, "wrong start hash (expected {}, got {})", exp, got),
BasicError::TooManyHeaders(ref max, ref got)
=> write!(f, "too many headers (max {}, got {})", max, got),
BasicError::Decoder(ref err)
=> write!(f, "{}", err),
}
}
}
/// Request verification constraint.
pub trait Constraint {
type Error;
/// Verify headers against this.
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>;
}
/// Do basic verification of provided headers against a request.
pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result<Vec<Header>, BasicError> {
let headers: Vec<_> = headers.iter().map(|h| h.decode()).collect();
let reverse = request.reverse;
Max(request.max as usize).verify(&headers, reverse)?;
match request.start {
HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?,
HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?,
}
SkipsBetween(request.skip).verify(&headers, reverse)?;
Ok(headers)
}
struct StartsAtNumber(u64);
struct StartsAtHash(H256);
struct SkipsBetween(u64);
struct Max(usize);
impl Constraint for StartsAtNumber {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
headers.first().map_or(Ok(()), |h| {
if h.number() == self.0 {
Ok(())
} else {
Err(BasicError::WrongStartNumber(self.0, h.number()))
}
})
}
}
impl Constraint for StartsAtHash {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
headers.first().map_or(Ok(()), |h| {
if h.hash() == self.0 {
Ok(())
} else {
Err(BasicError::WrongStartHash(self.0, h.hash()))
}
})
}
}
impl Constraint for SkipsBetween {
type Error = BasicError;
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> {
for pair in headers.windows(2) {
let (low, high) = if reverse { (&pair[1], &pair[0]) } else { (&pair[0], &pair[1]) };
if low.number() >= high.number() { return Err(BasicError::WrongSkip(self.0, None)) }
let skip = (high.number() - low.number()) - 1;
if skip!= self.0 { return Err(BasicError::WrongSkip(self.0, Some(skip))) }
}
Ok(())
}
}
impl Constraint for Max {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
match headers.len() > self.0 {
true => Err(BasicError::TooManyHeaders(self.0, headers.len())),
false => Ok(())
}
}
}
#[cfg(test)]
mod tests {
use ethcore::encoded;
use ethcore::header::Header;
use light::request::CompleteHeadersRequest as HeadersRequest;
use super::*;
#[test]
fn sequential_forward() {
let request = HeadersRequest {
start: 10.into(),
max: 30,
skip: 0,
reverse: false,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert!(verify(&headers, &request).is_ok());
}
#[test]
fn sequential_backward() {
let request = HeadersRequest {
start: 34.into(),
max: 30,
skip: 0,
reverse: true,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).rev().map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert!(verify(&headers, &request).is_ok());
}
#[test]
fn too_many() {
let request = HeadersRequest {
start: 10.into(),
max: 20,
skip: 0,
reverse: false,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert_eq!(verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25)));
}
#[test]
fn wrong_skip() {
let request = HeadersRequest {
start: 10.into(),
max: 30,
skip: 5,
reverse: false,
};
let headers: Vec<_> = (0..25).map(|x| x * 3).map(|x| x + 10).map(|x| { |
assert_eq!(verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2))));
}
} | let mut header = Header::default();
header.set_number(x);
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect(); | random_line_split |
response.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Helpers for decoding and verifying responses for headers.
use std::fmt;
use ethcore::encoded;
use ethcore::header::Header;
use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest};
use rlp::DecoderError;
use bigint::hash::H256;
/// Errors found when decoding headers and verifying with basic constraints.
#[derive(Debug, PartialEq)]
pub enum BasicError {
/// Wrong skip value: expected, found (if any).
WrongSkip(u64, Option<u64>),
/// Wrong start number.
WrongStartNumber(u64, u64),
/// Wrong start hash.
WrongStartHash(H256, H256),
/// Too many headers.
TooManyHeaders(usize, usize),
/// Decoder error.
Decoder(DecoderError),
}
impl From<DecoderError> for BasicError {
fn from(err: DecoderError) -> Self {
BasicError::Decoder(err)
}
}
impl fmt::Display for BasicError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Header response verification error: ")?;
match *self {
BasicError::WrongSkip(ref exp, ref got)
=> write!(f, "wrong skip (expected {}, got {:?})", exp, got),
BasicError::WrongStartNumber(ref exp, ref got)
=> write!(f, "wrong start number (expected {}, got {})", exp, got),
BasicError::WrongStartHash(ref exp, ref got)
=> write!(f, "wrong start hash (expected {}, got {})", exp, got),
BasicError::TooManyHeaders(ref max, ref got)
=> write!(f, "too many headers (max {}, got {})", max, got),
BasicError::Decoder(ref err)
=> write!(f, "{}", err),
}
}
}
/// Request verification constraint.
pub trait Constraint {
type Error;
/// Verify headers against this.
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>;
}
/// Do basic verification of provided headers against a request.
pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result<Vec<Header>, BasicError> {
let headers: Vec<_> = headers.iter().map(|h| h.decode()).collect();
let reverse = request.reverse;
Max(request.max as usize).verify(&headers, reverse)?;
match request.start {
HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?,
HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?,
}
SkipsBetween(request.skip).verify(&headers, reverse)?;
Ok(headers)
}
struct StartsAtNumber(u64);
struct StartsAtHash(H256);
struct SkipsBetween(u64);
struct Max(usize);
impl Constraint for StartsAtNumber {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
headers.first().map_or(Ok(()), |h| {
if h.number() == self.0 {
Ok(())
} else {
Err(BasicError::WrongStartNumber(self.0, h.number()))
}
})
}
}
impl Constraint for StartsAtHash {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
headers.first().map_or(Ok(()), |h| {
if h.hash() == self.0 {
Ok(())
} else {
Err(BasicError::WrongStartHash(self.0, h.hash()))
}
})
}
}
impl Constraint for SkipsBetween {
type Error = BasicError;
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> {
for pair in headers.windows(2) {
let (low, high) = if reverse { (&pair[1], &pair[0]) } else { (&pair[0], &pair[1]) };
if low.number() >= high.number() { return Err(BasicError::WrongSkip(self.0, None)) }
let skip = (high.number() - low.number()) - 1;
if skip!= self.0 { return Err(BasicError::WrongSkip(self.0, Some(skip))) }
}
Ok(())
}
}
impl Constraint for Max {
type Error = BasicError;
fn | (&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
match headers.len() > self.0 {
true => Err(BasicError::TooManyHeaders(self.0, headers.len())),
false => Ok(())
}
}
}
#[cfg(test)]
mod tests {
use ethcore::encoded;
use ethcore::header::Header;
use light::request::CompleteHeadersRequest as HeadersRequest;
use super::*;
#[test]
fn sequential_forward() {
let request = HeadersRequest {
start: 10.into(),
max: 30,
skip: 0,
reverse: false,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert!(verify(&headers, &request).is_ok());
}
#[test]
fn sequential_backward() {
let request = HeadersRequest {
start: 34.into(),
max: 30,
skip: 0,
reverse: true,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).rev().map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert!(verify(&headers, &request).is_ok());
}
#[test]
fn too_many() {
let request = HeadersRequest {
start: 10.into(),
max: 20,
skip: 0,
reverse: false,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert_eq!(verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25)));
}
#[test]
fn wrong_skip() {
let request = HeadersRequest {
start: 10.into(),
max: 30,
skip: 5,
reverse: false,
};
let headers: Vec<_> = (0..25).map(|x| x * 3).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert_eq!(verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2))));
}
}
| verify | identifier_name |
response.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Helpers for decoding and verifying responses for headers.
use std::fmt;
use ethcore::encoded;
use ethcore::header::Header;
use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest};
use rlp::DecoderError;
use bigint::hash::H256;
/// Errors found when decoding headers and verifying with basic constraints.
#[derive(Debug, PartialEq)]
pub enum BasicError {
/// Wrong skip value: expected, found (if any).
WrongSkip(u64, Option<u64>),
/// Wrong start number.
WrongStartNumber(u64, u64),
/// Wrong start hash.
WrongStartHash(H256, H256),
/// Too many headers.
TooManyHeaders(usize, usize),
/// Decoder error.
Decoder(DecoderError),
}
impl From<DecoderError> for BasicError {
fn from(err: DecoderError) -> Self {
BasicError::Decoder(err)
}
}
impl fmt::Display for BasicError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Header response verification error: ")?;
match *self {
BasicError::WrongSkip(ref exp, ref got)
=> write!(f, "wrong skip (expected {}, got {:?})", exp, got),
BasicError::WrongStartNumber(ref exp, ref got)
=> write!(f, "wrong start number (expected {}, got {})", exp, got),
BasicError::WrongStartHash(ref exp, ref got)
=> write!(f, "wrong start hash (expected {}, got {})", exp, got),
BasicError::TooManyHeaders(ref max, ref got)
=> write!(f, "too many headers (max {}, got {})", max, got),
BasicError::Decoder(ref err)
=> write!(f, "{}", err),
}
}
}
/// Request verification constraint.
pub trait Constraint {
type Error;
/// Verify headers against this.
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), Self::Error>;
}
/// Do basic verification of provided headers against a request.
pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result<Vec<Header>, BasicError> {
let headers: Vec<_> = headers.iter().map(|h| h.decode()).collect();
let reverse = request.reverse;
Max(request.max as usize).verify(&headers, reverse)?;
match request.start {
HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?,
HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?,
}
SkipsBetween(request.skip).verify(&headers, reverse)?;
Ok(headers)
}
struct StartsAtNumber(u64);
struct StartsAtHash(H256);
struct SkipsBetween(u64);
struct Max(usize);
impl Constraint for StartsAtNumber {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
headers.first().map_or(Ok(()), |h| {
if h.number() == self.0 {
Ok(())
} else {
Err(BasicError::WrongStartNumber(self.0, h.number()))
}
})
}
}
impl Constraint for StartsAtHash {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
headers.first().map_or(Ok(()), |h| {
if h.hash() == self.0 {
Ok(())
} else {
Err(BasicError::WrongStartHash(self.0, h.hash()))
}
})
}
}
impl Constraint for SkipsBetween {
type Error = BasicError;
fn verify(&self, headers: &[Header], reverse: bool) -> Result<(), BasicError> {
for pair in headers.windows(2) {
let (low, high) = if reverse { (&pair[1], &pair[0]) } else { (&pair[0], &pair[1]) };
if low.number() >= high.number() { return Err(BasicError::WrongSkip(self.0, None)) }
let skip = (high.number() - low.number()) - 1;
if skip!= self.0 { return Err(BasicError::WrongSkip(self.0, Some(skip))) }
}
Ok(())
}
}
impl Constraint for Max {
type Error = BasicError;
fn verify(&self, headers: &[Header], _reverse: bool) -> Result<(), BasicError> {
match headers.len() > self.0 {
true => Err(BasicError::TooManyHeaders(self.0, headers.len())),
false => Ok(())
}
}
}
#[cfg(test)]
mod tests {
use ethcore::encoded;
use ethcore::header::Header;
use light::request::CompleteHeadersRequest as HeadersRequest;
use super::*;
#[test]
fn sequential_forward() | }).collect();
assert!(verify(&headers, &request).is_ok());
}
#[test]
fn sequential_backward() {
let request = HeadersRequest {
start: 34.into(),
max: 30,
skip: 0,
reverse: true,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).rev().map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert!(verify(&headers, &request).is_ok());
}
#[test]
fn too_many() {
let request = HeadersRequest {
start: 10.into(),
max: 20,
skip: 0,
reverse: false,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert_eq!(verify(&headers, &request), Err(BasicError::TooManyHeaders(20, 25)));
}
#[test]
fn wrong_skip() {
let request = HeadersRequest {
start: 10.into(),
max: 30,
skip: 5,
reverse: false,
};
let headers: Vec<_> = (0..25).map(|x| x * 3).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
encoded::Header::new(::rlp::encode(&header).into_vec())
}).collect();
assert_eq!(verify(&headers, &request), Err(BasicError::WrongSkip(5, Some(2))));
}
}
| {
let request = HeadersRequest {
start: 10.into(),
max: 30,
skip: 0,
reverse: false,
};
let mut parent_hash = None;
let headers: Vec<_> = (0..25).map(|x| x + 10).map(|x| {
let mut header = Header::default();
header.set_number(x);
if let Some(parent_hash) = parent_hash {
header.set_parent_hash(parent_hash);
}
parent_hash = Some(header.hash());
encoded::Header::new(::rlp::encode(&header).into_vec()) | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.