content
stringlengths
228
999k
pred_label
stringclasses
1 value
pred_score
float64
0.5
1
KickJava   Java API By Example, From Geeks To Geeks. Java > Open Source Codes > org > dbunit > operation > AbstractBatchOperation 1 /* 2  * 3  * The DbUnit Database Testing Framework 4  * Copyright (C)2002-2004, DbUnit.org 5  * 6  * This library is free software; you can redistribute it and/or 7  * modify it under the terms of the GNU Lesser General Public 8  * License as published by the Free Software Foundation; either 9  * version 2.1 of the License, or (at your option) any later version. 10  * 11  * This library is distributed in the hope that it will be useful, 12  * but WITHOUT ANY WARRANTY; without even the implied warranty of 13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14  * Lesser General Public License for more details. 15  * 16  * You should have received a copy of the GNU Lesser General Public 17  * License along with this library; if not, write to the Free Software 18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19  * 20  */ 21 22 package org.dbunit.operation; 23 24 import org.dbunit.DatabaseUnitException; 25 import org.dbunit.database.DatabaseConfig; 26 import org.dbunit.database.IDatabaseConnection; 27 import org.dbunit.database.statement.IPreparedBatchStatement; 28 import org.dbunit.database.statement.IStatementFactory; 29 import org.dbunit.dataset.Column; 30 import org.dbunit.dataset.DataSetException; 31 import org.dbunit.dataset.IDataSet; 32 import org.dbunit.dataset.ITable; 33 import org.dbunit.dataset.ITableIterator; 34 import org.dbunit.dataset.ITableMetaData; 35 import org.dbunit.dataset.RowOutOfBoundsException; 36 37 import java.sql.SQLException JavaDoc; 38 import java.util.BitSet JavaDoc; 39 40 /** 41  * Base implementation for database operation that are executed in batch. 42  * 43  * @author Manuel Laflamme 44  * @version $Revision: 1.28 $ 45  * @since Feb 19, 2002 46  */ 47 public abstract class AbstractBatchOperation extends AbstractOperation 48 { 49     private static final BitSet JavaDoc EMPTY_BITSET = new BitSet JavaDoc(); 50     protected boolean _reverseRowOrder = false; 51 52     static boolean isEmpty(ITable table) throws DataSetException 53     { 54         Column[] columns = table.getTableMetaData().getColumns(); 55 56         // No columns = empty 57 if (columns.length == 0) 58         { 59             return true; 60         } 61 62         // Try to fetch first table value 63 try 64         { 65             table.getValue(0, columns[0].getColumnName()); 66             return false; 67         } 68         catch (RowOutOfBoundsException e) 69         { 70             // Not able to access first row thus empty 71 return true; 72         } 73     } 74 75     /** 76      * Returns list of tables this operation is applied to. This method 77      * allow subclass to do filtering. 78      */ 79     protected ITableIterator iterator(IDataSet dataSet) throws DatabaseUnitException 80     { 81         return dataSet.iterator(); 82     } 83 84     /** 85      * Returns mapping of columns to ignore by this operation. Each bit set represent 86      * a column to ignore. 87      */ 88     BitSet JavaDoc getIgnoreMapping(ITable table, int row) 89             throws DataSetException 90     { 91         return EMPTY_BITSET; 92     } 93 94     /** 95      * Returns false if the specified table row have a different ignore mapping 96      * than the specified mapping. 97      */ 98     boolean equalsIgnoreMapping(BitSet JavaDoc ignoreMapping, ITable table, 99             int row) throws DataSetException 100     { 101         return true; 102     } 103 104     abstract OperationData getOperationData(ITableMetaData metaData, 105             BitSet JavaDoc ignoreMapping, IDatabaseConnection connection) throws DataSetException; 106 107     //////////////////////////////////////////////////////////////////////////// 108 // DatabaseOperation class 109 110     public void execute(IDatabaseConnection connection, IDataSet dataSet) 111             throws DatabaseUnitException, SQLException JavaDoc 112     { 113         DatabaseConfig databaseConfig = connection.getConfig(); 114         IStatementFactory factory = (IStatementFactory)databaseConfig.getProperty( 115                 DatabaseConfig.PROPERTY_STATEMENT_FACTORY); 116 117         // for each table 118 ITableIterator iterator = iterator(dataSet); 119         while (iterator.next()) 120         { 121             ITable table = iterator.getTable(); 122 123             // Do not process empty table 124 if (isEmpty(table)) 125             { 126                 continue; 127             } 128 129             ITableMetaData metaData = getOperationMetaData(connection, 130                     table.getTableMetaData()); 131             BitSet JavaDoc ignoreMapping = null; 132             OperationData operationData = null; 133             IPreparedBatchStatement statement = null; 134 135             try 136             { 137                 // For each row 138 int start = _reverseRowOrder ? table.getRowCount() - 1 : 0; 139                 int increment = _reverseRowOrder ? -1 : 1; 140 141                 try 142                 { 143                     for (int i = start; ; i = i + increment) 144                     { 145                         int row = i; 146 147                         // If current row have a diffrent ignore value mapping than 148 // previous one, we generate a new statement 149 if (ignoreMapping == null || !equalsIgnoreMapping(ignoreMapping, table, row)) 150                         { 151                             // Execute and close previous statement 152 if (statement != null) 153                             { 154                                 statement.executeBatch(); 155                                 statement.clearBatch(); 156                                 statement.close(); 157                             } 158 159                             ignoreMapping = getIgnoreMapping(table, row); 160                             operationData = getOperationData(metaData, ignoreMapping, connection); 161                             statement = factory.createPreparedBatchStatement( 162                                     operationData.getSql(), connection); 163                         } 164 165 166                         // for each column 167 Column[] columns = operationData.getColumns(); 168                         for (int j = 0; j < columns.length; j++) 169                         { 170                             // Bind value only if not in ignore mapping 171 if (!ignoreMapping.get(j)) 172                             { 173                                 Column column = columns[j]; 174                                 statement.addValue(table.getValue(row, 175                                         column.getColumnName()), column.getDataType()); 176                             } 177                         } 178                         statement.addBatch(); 179                     } 180                 } 181                 catch (RowOutOfBoundsException e) 182                 { 183                     // end of table 184 } 185 186                 statement.executeBatch(); 187                 statement.clearBatch(); 188             } 189             finally 190             { 191                 if (statement != null) 192                 { 193                     statement.close(); 194                 } 195             } 196         } 197     } 198 } 199 200 201 202 203 204 205 206 207 208 209 210 211 Popular Tags
__label__pos
0.99821
PowerShell Update SQL Server Database from CSV PowerShell code that updates a SQL Server database from a CSV file, only modifying the values that have changed. Code Generator | 6 months ago Full Code Initializing... Oops, something went wrong. Please try again in a few moments. <# .SYNOPSIS Updates a SQL Server database from a CSV file, only modifying the values that have changed. .DESCRIPTION This function reads a CSV file and compares the data with the corresponding records in a SQL Server database table. It updates the database table with the new values from the CSV file, only modifying the columns that have changed. .PARAMETER Server The name of the SQL Server instance. .PARAMETER Database The name of the database. .PARAMETER Table The name of the table to be updated. .PARAMETER CSVPath The path to the CSV file. .EXAMPLE Update-SqlServerFromCsv -Server "localhost" -Database "MyDatabase" -Table "MyTable" -CSVPath "C:\data.csv" Updates the "MyTable" table in the "MyDatabase" database on the "localhost" SQL Server instance with the data from "C:\data.csv". #> function Update-SqlServerFromCsv { param ( [string]$Server, [string]$Database, [string]$Table, [string]$CSVPath ) # Import the CSV file $csvData = Import-Csv -Path $CSVPath # Connect to the SQL Server database $connectionString = "Server=$Server;Database=$Database;Integrated Security=True" $connection = New-Object System.Data.SqlClient.SqlConnection($connectionString) $connection.Open() try { # Get the schema of the table $command = $connection.CreateCommand() $command.CommandText = "SELECT * FROM $Table WHERE 1 = 0" $adapter = New-Object System.Data.SqlClient.SqlDataAdapter($command) $dataset = New-Object System.Data.DataSet $adapter.Fill($dataset) $tableSchema = $dataset.Tables[0] # Iterate through each row in the CSV file foreach ($row in $csvData) { # Check if the row exists in the database table $filter = "" foreach ($column in $tableSchema.Columns) { $columnName = $column.ColumnName $columnValue = $row.$columnName # Build the filter condition if ($filter -ne "") { $filter += " AND " } $filter += "$columnName = '$columnValue'" } $command.CommandText = "SELECT * FROM $Table WHERE $filter" $adapter.SelectCommand = $command $dataset.Clear() $adapter.Fill($dataset) if ($dataset.Tables[0].Rows.Count -eq 1) { # The row exists, update the modified columns $dbRow = $dataset.Tables[0].Rows[0] foreach ($column in $tableSchema.Columns) { $columnName = $column.ColumnName $columnValue = $row.$columnName # Check if the column value has changed if ($columnValue -ne $dbRow[$columnName]) { $dbRow[$columnName] = $columnValue } } # Update the row in the database table $commandBuilder = New-Object System.Data.SqlClient.SqlCommandBuilder($adapter) $adapter.Update($dataset) } else { # The row does not exist, insert a new row $newRow = $tableSchema.NewRow() foreach ($column in $tableSchema.Columns) { $columnName = $column.ColumnName $columnValue = $row.$columnName $newRow[$columnName] = $columnValue } $tableSchema.Rows.Add($newRow) # Insert the new row into the database table $commandBuilder = New-Object System.Data.SqlClient.SqlCommandBuilder($adapter) $adapter.Update($dataset) } } } finally { # Close the database connection $connection.Close() } } # Usage example for the Update-SqlServerFromCsv function Update-SqlServerFromCsv -Server "localhost" -Database "MyDatabase" -Table "MyTable" -CSVPath "C:\data.csv" This page provides a PowerShell function that allows you to update a SQL Server database from a CSV file, only modifying the values that have changed. The function reads the CSV file, compares the data with the corresponding records in the database table, and updates the table with the new values from the CSV file. It ensures that only the columns that have changed are modified, minimizing unnecessary updates. This functionality can be useful when you have a large dataset and want to efficiently update your database with the latest information from a CSV file. To use the function, you need to provide the name of the SQL Server instance, the database name, the table name to be updated, and the path to the CSV file. An example usage is provided to demonstrate how to use the function effectively. This article was generated with AI. AI can make mistakes, consider checking important information.
__label__pos
0.999353
OmniSciDB  72180abbfe  All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages HashJoinRuntimeGpu.cu Go to the documentation of this file. 1 /* 2  * Copyright 2017 MapD Technologies, Inc. 3  * 4  * Licensed under the Apache License, Version 2.0 (the "License"); 5  * you may not use this file except in compliance with the License. 6  * You may obtain a copy of the License at 7  * 8  * http://www.apache.org/licenses/LICENSE-2.0 9  * 10  * Unless required by applicable law or agreed to in writing, software 11  * distributed under the License is distributed on an "AS IS" BASIS, 12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13  * See the License for the specific language governing permissions and 14  * limitations under the License. 15  */ 16 #include "HashJoinRuntime.cpp" 17  18 #include <thrust/device_ptr.h> 19 #include <thrust/scan.h> 20  21 __global__ void fill_hash_join_buff_wrapper(int32_t* buff, 22  const int32_t invalid_slot_val, 23  const JoinColumn join_column, 24  const JoinColumnTypeInfo type_info, 25  int* err) { 26  int partial_err = SUFFIX(fill_hash_join_buff)( 27  buff, invalid_slot_val, join_column, type_info, NULL, NULL, -1, -1); 28  atomicCAS(err, 0, partial_err); 29 } 30  32  int32_t* buff, 33  const int32_t invalid_slot_val, 34  const JoinColumn join_column, 35  const JoinColumnTypeInfo type_info, 36  int* err, 37  const int64_t bucket_normalization) { 38  int partial_err = SUFFIX(fill_hash_join_buff_bucketized)(buff, 39  invalid_slot_val, 40  join_column, 41  type_info, 42  NULL, 43  NULL, 44  -1, 45  -1, 46  bucket_normalization); 47  atomicCAS(err, 0, partial_err); 48 } 49  51  const int32_t invalid_slot_val, 52  int* dev_err_buff, 53  const JoinColumn join_column, 54  const JoinColumnTypeInfo type_info, 55  const size_t block_size_x, 56  const size_t grid_size_x, 57  const int64_t bucket_normalization) { 58  fill_hash_join_buff_bucketized_wrapper<<<grid_size_x, block_size_x>>>( 59  buff, invalid_slot_val, join_column, type_info, dev_err_buff, bucket_normalization); 60 } 61  62 void fill_hash_join_buff_on_device(int32_t* buff, 63  const int32_t invalid_slot_val, 64  int* dev_err_buff, 65  const JoinColumn join_column, 66  const JoinColumnTypeInfo type_info, 67  const size_t block_size_x, 68  const size_t grid_size_x) { 69  fill_hash_join_buff_wrapper<<<grid_size_x, block_size_x>>>( 70  buff, invalid_slot_val, join_column, type_info, dev_err_buff); 71 } 72  74  int32_t* buff, 75  const int32_t invalid_slot_val, 76  const JoinColumn join_column, 77  const JoinColumnTypeInfo type_info, 78  const ShardInfo shard_info, 79  int* err, 80  const int64_t bucket_normalization) { 81  int partial_err = SUFFIX(fill_hash_join_buff_sharded_bucketized)(buff, 82  invalid_slot_val, 83  join_column, 84  type_info, 85  shard_info, 86  NULL, 87  NULL, 88  -1, 89  -1, 90  bucket_normalization); 91  atomicCAS(err, 0, partial_err); 92 } 93  94 __global__ void fill_hash_join_buff_wrapper_sharded(int32_t* buff, 95  const int32_t invalid_slot_val, 96  const JoinColumn join_column, 97  const JoinColumnTypeInfo type_info, 98  const ShardInfo shard_info, 99  int* err) { 100  int partial_err = SUFFIX(fill_hash_join_buff_sharded)( 101  buff, invalid_slot_val, join_column, type_info, shard_info, NULL, NULL, -1, -1); 102  atomicCAS(err, 0, partial_err); 103 } 104  106  int32_t* buff, 107  const int32_t invalid_slot_val, 108  int* dev_err_buff, 109  const JoinColumn join_column, 110  const JoinColumnTypeInfo type_info, 111  const ShardInfo shard_info, 112  const size_t block_size_x, 113  const size_t grid_size_x, 114  const int64_t bucket_normalization) { 115  fill_hash_join_buff_wrapper_sharded_bucketized<<<grid_size_x, block_size_x>>>( 116  buff, 117  invalid_slot_val, 118  join_column, 119  type_info, 120  shard_info, 121  dev_err_buff, 122  bucket_normalization); 123 } 124  126  const int32_t invalid_slot_val, 127  int* dev_err_buff, 128  const JoinColumn join_column, 129  const JoinColumnTypeInfo type_info, 130  const ShardInfo shard_info, 131  const size_t block_size_x, 132  const size_t grid_size_x) { 133  fill_hash_join_buff_wrapper_sharded<<<grid_size_x, block_size_x>>>( 134  buff, invalid_slot_val, join_column, type_info, shard_info, dev_err_buff); 135 } 136  137 __global__ void init_hash_join_buff_wrapper(int32_t* buff, 138  const int32_t hash_entry_count, 139  const int32_t invalid_slot_val) { 140  SUFFIX(init_hash_join_buff)(buff, hash_entry_count, invalid_slot_val, -1, -1); 141 } 142  143 void init_hash_join_buff_on_device(int32_t* buff, 144  const int32_t hash_entry_count, 145  const int32_t invalid_slot_val, 146  const size_t block_size_x, 147  const size_t grid_size_x) { 148  init_hash_join_buff_wrapper<<<grid_size_x, block_size_x>>>( 149  buff, hash_entry_count, invalid_slot_val); 150 } 151  152 #define VALID_POS_FLAG 0 153  154 __global__ void set_valid_pos_flag(int32_t* pos_buff, 155  const int32_t* count_buff, 156  const int32_t entry_count) { 157  const int32_t start = threadIdx.x + blockDim.x * blockIdx.x; 158  const int32_t step = blockDim.x * gridDim.x; 159  for (int32_t i = start; i < entry_count; i += step) { 160  if (count_buff[i]) { 161  pos_buff[i] = VALID_POS_FLAG; 162  } 163  } 164 } 165  166 __global__ void set_valid_pos(int32_t* pos_buff, 167  int32_t* count_buff, 168  const int32_t entry_count) { 169  const int32_t start = threadIdx.x + blockDim.x * blockIdx.x; 170  const int32_t step = blockDim.x * gridDim.x; 171  for (int32_t i = start; i < entry_count; i += step) { 172  if (VALID_POS_FLAG == pos_buff[i]) { 173  pos_buff[i] = !i ? 0 : count_buff[i - 1]; 174  } 175  } 176 } 177  178 template <typename COUNT_MATCHES_FUNCTOR, typename FILL_ROW_IDS_FUNCTOR> 180  const int32_t hash_entry_count, 181  const int32_t invalid_slot_val, 182  const JoinColumn& join_column, 183  const JoinColumnTypeInfo& type_info, 184  const size_t block_size_x, 185  const size_t grid_size_x, 186  COUNT_MATCHES_FUNCTOR count_matches_func, 187  FILL_ROW_IDS_FUNCTOR fill_row_ids_func) { 188  int32_t* pos_buff = buff; 189  int32_t* count_buff = buff + hash_entry_count; 190  cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t)); 191  count_matches_func(); 192  193  set_valid_pos_flag<<<grid_size_x, block_size_x>>>( 194  pos_buff, count_buff, hash_entry_count); 195  196  auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff); 198  count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr); 199  set_valid_pos<<<grid_size_x, block_size_x>>>(pos_buff, count_buff, hash_entry_count); 200  cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t)); 201  fill_row_ids_func(); 202 } 203  205  const HashEntryInfo hash_entry_info, 206  const int32_t invalid_slot_val, 207  const JoinColumn& join_column, 208  const JoinColumnTypeInfo& type_info, 209  const size_t block_size_x, 210  const size_t grid_size_x) { 211  auto hash_entry_count = hash_entry_info.hash_entry_count; 212  auto count_matches_func = [hash_entry_count, 213  grid_size_x, 214  block_size_x, 215  count_buff = buff + hash_entry_count, 216  invalid_slot_val, 217  join_column, 218  type_info] { 219  SUFFIX(count_matches)<<<grid_size_x, block_size_x>>>( 220  count_buff, invalid_slot_val, join_column, type_info); 221  }; 222  223  auto fill_row_ids_func = [grid_size_x, 224  block_size_x, 225  buff, 226  hash_entry_count, 227  invalid_slot_val, 228  join_column, 229  type_info] { 230  SUFFIX(fill_row_ids)<<<grid_size_x, block_size_x>>>( 231  buff, hash_entry_count, invalid_slot_val, join_column, type_info); 232  }; 233  235  hash_entry_count, 236  invalid_slot_val, 237  join_column, 238  type_info, 239  block_size_x, 240  grid_size_x, 241  count_matches_func, 242  fill_row_ids_func); 243 } 244  246  const HashEntryInfo hash_entry_info, 247  const int32_t invalid_slot_val, 248  const JoinColumn& join_column, 249  const JoinColumnTypeInfo& type_info, 250  const size_t block_size_x, 251  const size_t grid_size_x) { 252  auto hash_entry_count = hash_entry_info.getNormalizedHashEntryCount(); 253  auto count_matches_func = [grid_size_x, 254  block_size_x, 255  count_buff = buff + hash_entry_count, 256  invalid_slot_val, 257  join_column, 258  type_info, 259  bucket_normalization = 260  hash_entry_info.bucket_normalization] { 261  SUFFIX(count_matches_bucketized)<<<grid_size_x, block_size_x>>>( 262  count_buff, invalid_slot_val, join_column, type_info, bucket_normalization); 263  }; 264  265  auto fill_row_ids_func = [grid_size_x, 266  block_size_x, 267  buff, 268  hash_entry_count = 269  hash_entry_info.getNormalizedHashEntryCount(), 270  invalid_slot_val, 271  join_column, 272  type_info, 273  bucket_normalization = hash_entry_info.bucket_normalization] { 274  SUFFIX(fill_row_ids_bucketized)<<<grid_size_x, block_size_x>>>(buff, 275  hash_entry_count, 276  invalid_slot_val, 277  join_column, 278  type_info, 279  bucket_normalization); 280  }; 281  283  hash_entry_count, 284  invalid_slot_val, 285  join_column, 286  type_info, 287  block_size_x, 288  grid_size_x, 289  count_matches_func, 290  fill_row_ids_func); 291 } 292  294  const HashEntryInfo hash_entry_info, 295  const int32_t invalid_slot_val, 296  const JoinColumn& join_column, 297  const JoinColumnTypeInfo& type_info, 298  const ShardInfo& shard_info, 299  const size_t block_size_x, 300  const size_t grid_size_x) { 301  auto hash_entry_count = hash_entry_info.hash_entry_count; 302  int32_t* pos_buff = buff; 303  int32_t* count_buff = buff + hash_entry_count; 304  cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t)); 305  SUFFIX(count_matches_sharded)<<<grid_size_x, block_size_x>>>( 306  count_buff, invalid_slot_val, join_column, type_info, shard_info); 307  308  set_valid_pos_flag<<<grid_size_x, block_size_x>>>( 309  pos_buff, count_buff, hash_entry_count); 310  311  auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff); 313  count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr); 314  set_valid_pos<<<grid_size_x, block_size_x>>>(pos_buff, count_buff, hash_entry_count); 315  cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t)); 316  SUFFIX(fill_row_ids_sharded)<<<grid_size_x, block_size_x>>>( 317  buff, hash_entry_count, invalid_slot_val, join_column, type_info, shard_info); 318 } 319  320 template <typename T, typename KEY_HANDLER> 322  const T* composite_key_dict, 323  const size_t hash_entry_count, 324  const int32_t invalid_slot_val, 325  const KEY_HANDLER* key_handler, 326  const size_t num_elems, 327  const size_t block_size_x, 328  const size_t grid_size_x) { 329  auto pos_buff = buff; 330  auto count_buff = buff + hash_entry_count; 331  cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t)); 332  count_matches_baseline_gpu<<<grid_size_x, block_size_x>>>( 333  count_buff, composite_key_dict, hash_entry_count, key_handler, num_elems); 334  335  set_valid_pos_flag<<<grid_size_x, block_size_x>>>( 336  pos_buff, count_buff, hash_entry_count); 337  338  auto count_buff_dev_ptr = thrust::device_pointer_cast(count_buff); 340  count_buff_dev_ptr, count_buff_dev_ptr + hash_entry_count, count_buff_dev_ptr); 341  set_valid_pos<<<grid_size_x, block_size_x>>>(pos_buff, count_buff, hash_entry_count); 342  cudaMemset(count_buff, 0, hash_entry_count * sizeof(int32_t)); 343  fill_row_ids_baseline_gpu<<<grid_size_x, block_size_x>>>(buff, 344  composite_key_dict, 345  hash_entry_count, 346  invalid_slot_val, 347  key_handler, 348  num_elems); 349 } 350  351 template <typename T> 352 __global__ void init_baseline_hash_join_buff_wrapper(int8_t* hash_join_buff, 353  const size_t entry_count, 354  const size_t key_component_count, 355  const bool with_val_slot, 356  const int32_t invalid_slot_val) { 357  SUFFIX(init_baseline_hash_join_buff)<T>(hash_join_buff, 358  entry_count, 359  key_component_count, 360  with_val_slot, 361  invalid_slot_val, 362  -1, 363  -1); 364 } 365  366 void init_baseline_hash_join_buff_on_device_32(int8_t* hash_join_buff, 367  const int32_t entry_count, 368  const size_t key_component_count, 369  const bool with_val_slot, 370  const int32_t invalid_slot_val, 371  const size_t block_size_x, 372  const size_t grid_size_x) { 373  init_baseline_hash_join_buff_wrapper<int32_t><<<grid_size_x, block_size_x>>>( 374  hash_join_buff, entry_count, key_component_count, with_val_slot, invalid_slot_val); 375 } 376  377 void init_baseline_hash_join_buff_on_device_64(int8_t* hash_join_buff, 378  const int32_t entry_count, 379  const size_t key_component_count, 380  const bool with_val_slot, 381  const int32_t invalid_slot_val, 382  const size_t block_size_x, 383  const size_t grid_size_x) { 384  init_baseline_hash_join_buff_wrapper<int64_t><<<grid_size_x, block_size_x>>>( 385  hash_join_buff, entry_count, key_component_count, with_val_slot, invalid_slot_val); 386 } 387  388 template <typename T, typename KEY_HANDLER> 389 __global__ void fill_baseline_hash_join_buff_wrapper(int8_t* hash_buff, 390  const size_t entry_count, 391  const int32_t invalid_slot_val, 392  const size_t key_component_count, 393  const bool with_val_slot, 394  int* err, 395  const KEY_HANDLER* key_handler, 396  const size_t num_elems) { 397  int partial_err = SUFFIX(fill_baseline_hash_join_buff)<T>(hash_buff, 398  entry_count, 399  invalid_slot_val, 400  key_component_count, 401  with_val_slot, 402  key_handler, 403  num_elems, 404  -1, 405  -1); 406  atomicCAS(err, 0, partial_err); 407 } 408  410  const size_t entry_count, 411  const int32_t invalid_slot_val, 412  const size_t key_component_count, 413  const bool with_val_slot, 414  int* dev_err_buff, 415  const GenericKeyHandler* key_handler, 416  const size_t num_elems, 417  const size_t block_size_x, 418  const size_t grid_size_x) { 419  fill_baseline_hash_join_buff_wrapper<int32_t> 420  <<<grid_size_x, block_size_x>>>(hash_buff, 421  entry_count, 422  invalid_slot_val, 423  key_component_count, 424  with_val_slot, 425  dev_err_buff, 426  key_handler, 427  num_elems); 428 } 429  431  const size_t entry_count, 432  const int32_t invalid_slot_val, 433  const size_t key_component_count, 434  const bool with_val_slot, 435  int* dev_err_buff, 436  const GenericKeyHandler* key_handler, 437  const size_t num_elems, 438  const size_t block_size_x, 439  const size_t grid_size_x) { 440  fill_baseline_hash_join_buff_wrapper<unsigned long long> 441  <<<grid_size_x, block_size_x>>>(hash_buff, 442  entry_count, 443  invalid_slot_val, 444  key_component_count, 445  with_val_slot, 446  dev_err_buff, 447  key_handler, 448  num_elems); 449 } 450  452  int8_t* hash_buff, 453  const size_t entry_count, 454  const int32_t invalid_slot_val, 455  const size_t key_component_count, 456  const bool with_val_slot, 457  int* dev_err_buff, 458  const OverlapsKeyHandler* key_handler, 459  const size_t num_elems, 460  const size_t block_size_x, 461  const size_t grid_size_x) { 462  fill_baseline_hash_join_buff_wrapper<unsigned long long> 463  <<<grid_size_x, block_size_x>>>(hash_buff, 464  entry_count, 465  invalid_slot_val, 466  key_component_count, 467  with_val_slot, 468  dev_err_buff, 469  key_handler, 470  num_elems); 471 } 472  474  int32_t* buff, 475  const int32_t* composite_key_dict, 476  const size_t hash_entry_count, 477  const int32_t invalid_slot_val, 478  const size_t key_component_count, 479  const GenericKeyHandler* key_handler, 480  const size_t num_elems, 481  const size_t block_size_x, 482  const size_t grid_size_x) { 483  fill_one_to_many_baseline_hash_table_on_device<int32_t>(buff, 484  composite_key_dict, 485  hash_entry_count, 486  invalid_slot_val, 487  key_handler, 488  num_elems, 489  block_size_x, 490  grid_size_x); 491 } 492  494  int32_t* buff, 495  const int64_t* composite_key_dict, 496  const size_t hash_entry_count, 497  const int32_t invalid_slot_val, 498  const GenericKeyHandler* key_handler, 499  const size_t num_elems, 500  const size_t block_size_x, 501  const size_t grid_size_x) { 502  fill_one_to_many_baseline_hash_table_on_device<int64_t>(buff, 503  composite_key_dict, 504  hash_entry_count, 505  invalid_slot_val, 506  key_handler, 507  num_elems, 508  block_size_x, 509  grid_size_x); 510 } 511  513  int32_t* buff, 514  const int64_t* composite_key_dict, 515  const size_t hash_entry_count, 516  const int32_t invalid_slot_val, 517  const OverlapsKeyHandler* key_handler, 518  const size_t num_elems, 519  const size_t block_size_x, 520  const size_t grid_size_x) { 521  fill_one_to_many_baseline_hash_table_on_device<int64_t>(buff, 522  composite_key_dict, 523  hash_entry_count, 524  invalid_slot_val, 525  key_handler, 526  num_elems, 527  block_size_x, 528  grid_size_x); 529 } 530  532  const uint32_t b, 533  int32_t* row_counts_buffer, 534  const OverlapsKeyHandler* key_handler, 535  const size_t num_elems, 536  const size_t block_size_x, 537  const size_t grid_size_x) { 538  approximate_distinct_tuples_impl_gpu<<<grid_size_x, block_size_x>>>( 539  hll_buffer, row_counts_buffer, b, num_elems, key_handler); 540  541  auto row_counts_buffer_ptr = thrust::device_pointer_cast(row_counts_buffer); 543  row_counts_buffer_ptr, row_counts_buffer_ptr + num_elems, row_counts_buffer_ptr); 544 } 545  546 void approximate_distinct_tuples_on_device(uint8_t* hll_buffer, 547  const uint32_t b, 548  const GenericKeyHandler* key_handler, 549  const size_t num_elems, 550  const size_t block_size_x, 551  const size_t grid_size_x) { 552  approximate_distinct_tuples_impl_gpu<<<grid_size_x, block_size_x>>>( 553  hll_buffer, nullptr, b, num_elems, key_handler); 554 } 555  556 void compute_bucket_sizes_on_device(double* bucket_sizes_buffer, 557  const JoinColumn* join_column, 558  const JoinColumnTypeInfo* type_info, 559  const double bucket_sz_threshold, 560  const size_t block_size_x, 561  const size_t grid_size_x) { 562  compute_bucket_sizes_impl_gpu<2><<<grid_size_x, block_size_x>>>(bucket_sizes_buffer, 563  join_column, 564  type_info, 565  bucket_sz_threshold, 566  block_size_x, 567  grid_size_x); 568 } GLOBAL void SUFFIX() count_matches_bucketized(int32_t *count_buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count, const int64_t bucket_normalization) void fill_one_to_many_hash_table_on_device(int32_t *buff, const HashEntryInfo hash_entry_info, const int32_t invalid_slot_val, const JoinColumn &join_column, const JoinColumnTypeInfo &type_info, const size_t block_size_x, const size_t grid_size_x) void overlaps_fill_baseline_hash_join_buff_on_device_64(int8_t *hash_buff, const size_t entry_count, const int32_t invalid_slot_val, const size_t key_component_count, const bool with_val_slot, int *dev_err_buff, const OverlapsKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) void fill_one_to_many_baseline_hash_table_on_device_32(int32_t *buff, const int32_t *composite_key_dict, const size_t hash_entry_count, const int32_t invalid_slot_val, const size_t key_component_count, const GenericKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) void fill_baseline_hash_join_buff_on_device_32(int8_t *hash_buff, const size_t entry_count, const int32_t invalid_slot_val, const size_t key_component_count, const bool with_val_slot, int *dev_err_buff, const GenericKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) DEVICE void SUFFIX() init_hash_join_buff(int32_t *groups_buffer, const int32_t hash_entry_count, const int32_t invalid_slot_val, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) void fill_one_to_many_hash_table_on_device_sharded(int32_t *buff, const HashEntryInfo hash_entry_info, const int32_t invalid_slot_val, const JoinColumn &join_column, const JoinColumnTypeInfo &type_info, const ShardInfo &shard_info, const size_t block_size_x, const size_t grid_size_x) void fill_hash_join_buff_on_device(int32_t *buff, const int32_t invalid_slot_val, int *dev_err_buff, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const size_t block_size_x, const size_t grid_size_x) GLOBAL void SUFFIX() fill_row_ids_sharded(int32_t *buff, const int32_t hash_entry_count, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) GLOBAL void SUFFIX() fill_row_ids_bucketized(int32_t *buff, const int32_t hash_entry_count, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count, const int64_t bucket_normalization) __global__ void set_valid_pos(int32_t *pos_buff, int32_t *count_buff, const int32_t entry_count) #define SUFFIX(name) void compute_bucket_sizes_on_device(double *bucket_sizes_buffer, const JoinColumn *join_column, const JoinColumnTypeInfo *type_info, const double bucket_sz_threshold, const size_t block_size_x, const size_t grid_size_x) __global__ void fill_hash_join_buff_wrapper_sharded(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, int *err) GLOBAL void SUFFIX() count_matches_sharded(int32_t *count_buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) void inclusive_scan(InputIterator first, InputIterator last, OutputIterator out, const size_t thread_count) __global__ void fill_hash_join_buff_bucketized_wrapper(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, int *err, const int64_t bucket_normalization) __global__ void fill_hash_join_buff_wrapper(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, int *err) DEVICE int SUFFIX() fill_hash_join_buff_sharded_bucketized(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count, const int64_t bucket_normalization) DEVICE int SUFFIX() fill_baseline_hash_join_buff(int8_t *hash_buff, const size_t entry_count, const int32_t invalid_slot_val, const size_t key_component_count, const bool with_val_slot, const FILL_HANDLER *f, const size_t num_elems, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) void fill_one_to_many_hash_table_on_device_impl(int32_t *buff, const int32_t hash_entry_count, const int32_t invalid_slot_val, const JoinColumn &join_column, const JoinColumnTypeInfo &type_info, const size_t block_size_x, const size_t grid_size_x, COUNT_MATCHES_FUNCTOR count_matches_func, FILL_ROW_IDS_FUNCTOR fill_row_ids_func) __global__ void fill_baseline_hash_join_buff_wrapper(int8_t *hash_buff, const size_t entry_count, const int32_t invalid_slot_val, const size_t key_component_count, const bool with_val_slot, int *err, const KEY_HANDLER *key_handler, const size_t num_elems) DEVICE void SUFFIX() init_baseline_hash_join_buff(int8_t *hash_buff, const size_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) #define VALID_POS_FLAG void fill_hash_join_buff_on_device_sharded(int32_t *buff, const int32_t invalid_slot_val, int *dev_err_buff, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, const size_t block_size_x, const size_t grid_size_x) GLOBAL void SUFFIX() fill_row_ids(int32_t *buff, const int32_t hash_entry_count, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) void fill_one_to_many_hash_table_on_device_bucketized(int32_t *buff, const HashEntryInfo hash_entry_info, const int32_t invalid_slot_val, const JoinColumn &join_column, const JoinColumnTypeInfo &type_info, const size_t block_size_x, const size_t grid_size_x) void approximate_distinct_tuples_on_device(uint8_t *hll_buffer, const uint32_t b, const GenericKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) void init_baseline_hash_join_buff_on_device_32(int8_t *hash_join_buff, const int32_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val, const size_t block_size_x, const size_t grid_size_x) int64_t bucket_normalization size_t hash_entry_count void fill_one_to_many_baseline_hash_table_on_device(int32_t *buff, const T *composite_key_dict, const size_t hash_entry_count, const int32_t invalid_slot_val, const KEY_HANDLER *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) void init_baseline_hash_join_buff_on_device_64(int8_t *hash_join_buff, const int32_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val, const size_t block_size_x, const size_t grid_size_x) __global__ void set_valid_pos_flag(int32_t *pos_buff, const int32_t *count_buff, const int32_t entry_count) GLOBAL void SUFFIX() count_matches(int32_t *count_buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) void fill_one_to_many_baseline_hash_table_on_device_64(int32_t *buff, const int64_t *composite_key_dict, const size_t hash_entry_count, const int32_t invalid_slot_val, const GenericKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) void init_hash_join_buff_on_device(int32_t *buff, const int32_t entry_count, const int32_t invalid_slot_val, const size_t block_size_x, const size_t grid_size_x) DEVICE int SUFFIX() fill_hash_join_buff(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) void fill_baseline_hash_join_buff_on_device_64(int8_t *hash_buff, const size_t entry_count, const int32_t invalid_slot_val, const size_t key_component_count, const bool with_val_slot, int *dev_err_buff, const GenericKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) size_t getNormalizedHashEntryCount() const void approximate_distinct_tuples_on_device_overlaps(uint8_t *hll_buffer, const uint32_t b, int32_t *row_counts_buffer, const OverlapsKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) __global__ void fill_hash_join_buff_wrapper_sharded_bucketized(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, int *err, const int64_t bucket_normalization) __global__ void init_hash_join_buff_wrapper(int32_t *buff, const int32_t hash_entry_count, const int32_t invalid_slot_val) void overlaps_fill_one_to_many_baseline_hash_table_on_device_64(int32_t *buff, const int64_t *composite_key_dict, const size_t hash_entry_count, const int32_t invalid_slot_val, const OverlapsKeyHandler *key_handler, const size_t num_elems, const size_t block_size_x, const size_t grid_size_x) __global__ void init_baseline_hash_join_buff_wrapper(int8_t *hash_join_buff, const size_t entry_count, const size_t key_component_count, const bool with_val_slot, const int32_t invalid_slot_val) DEVICE int SUFFIX() fill_hash_join_buff_bucketized(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count, const int64_t bucket_normalization) DEVICE int SUFFIX() fill_hash_join_buff_sharded(int32_t *buff, const int32_t invalid_slot_val, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, const void *sd_inner_proxy, const void *sd_outer_proxy, const int32_t cpu_thread_idx, const int32_t cpu_thread_count) void fill_hash_join_buff_on_device_sharded_bucketized(int32_t *buff, const int32_t invalid_slot_val, int *dev_err_buff, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const ShardInfo shard_info, const size_t block_size_x, const size_t grid_size_x, const int64_t bucket_normalization) void fill_hash_join_buff_on_device_bucketized(int32_t *buff, const int32_t invalid_slot_val, int *dev_err_buff, const JoinColumn join_column, const JoinColumnTypeInfo type_info, const size_t block_size_x, const size_t grid_size_x, const int64_t bucket_normalization)
__label__pos
0.999519
Lecture 2 - Code Dangling pointer example #include <iostream> using namespace std; int main() { int *a = new int; int *b = new int; cout << " a = " << a << " b = " << b << endl; *a = 4; cout << " *a = "<< *a << " *b = " << *b << endl; b = a; cout << " a = " << a << " b = " << b << endl; cout << " *a = "<< *a << " *b = " << *b << endl; delete a; //delete b; //dangling pointer b = NULL; delete b; } Solution to memory leak #include <iostream> using namespace std; int main() { int *a = new int; int *b = new int; int *c = a; a = b; //after this there is no pointer to memory pointed by a delete a; //delete b; //memory leak delete c; } Unless otherwise stated, the content of this page is licensed under Creative Commons Attribution-ShareAlike 3.0 License
__label__pos
1
A Gardner-esque puzzle One of my favourite sources of puzzles at the moment is @WWMGT – What Would Martin Gardner Tweet? (Martin Gardner, in case you’re not up on the greats of popular maths writing, was one of the greats of popular maths writing – and is indirectly responsible for Big MathsJam.) Recently, it was decreed that Martin Gardner would have tweeted: Show that no square of two or more digits can have only odd digits. Must be easy, I thought. Let’s do it by contradiction, and try to find a square number – $k^2$ with only odd digits. $k$ has to be odd, because otherwise it would end in an even digit – so it must end in 1, 3, 5, 7 or 9. If $k$ ends in 1, it can be written as $k = 10n+1$, so $k^2 = 100n^2 + 20n + 1$; its last digit is 1, but the one before it is even ($\frac{k^2 – 1}{10} = 10n^2 + 2n$). A similar argument accounts for $k$ ending in 3: if $k = 10n + 3$, $k^2 = 100n^2 + 60n + 9$, which again has an even penultimate digit. It was at exactly this point that inspiration struck. You see, I’d been worried about how I was going to deal with awkward numbers like somethingty-seven and somethingty-nine – but then it struck me: I can write those as $10n – 3$ and $10n – 1$ and apply the same reasoning as before! That just leaves me with somethingty-five. If $k = 10n + 5$, then $k^2 = 100n^2 + 100n + 25$, in which case the penultimate digit has to be 2. And we’re done! None of the possibilities hold up, so our assumption that there was such a number must have been mistaken. Colin Colin is a Weymouth maths tutor, author of several Maths For Dummies books and A-level maths guides. He started Flying Colours Maths in 2008. He lives with an espresso pot and nothing to prove. Share 5 comments on “A Gardner-esque puzzle Leave a Reply Your email address will not be published. Required fields are marked * This site uses Akismet to reduce spam. Learn how your comment data is processed. Sign up for the Sum Comfort newsletter and get a free e-book of mathematical quotations. No spam ever, obviously. Where do you teach? I teach in my home in Abbotsbury Road, Weymouth. It's a 15-minute walk from Weymouth station, and it's on bus routes 3, 8 and X53. On-road parking is available nearby. On twitter
__label__pos
0.823314
isort.awk # insertion sort isort.awk isort.awk { A[NR] = $0 } isort.awk isort.awk END { isort(A, NR) isort.awk for (i = 1; i <= NR; i++) isort.awk print A[i] isort.awk } isort.awk isort.awk # isort - sort A[1..n] by insertion isort.awk isort.awk function isort(A,n, i,j,t) { isort.awk for (i = 2; i <= n; i++) isort.awk for (j = i; j > 1 && A[j-1] > A[j]; j--) { isort.awk # swap A[j-1] and A[j] isort.awk t = A[j-1]; A[j-1] = A[j]; A[j] = t isort.awk } isort.awk } ptest.awk # batch test of sorting routines ptest.awk ptest.awk BEGIN { ptest.awk print " 0 elements" ptest.awk isort(A, 0); check(A, 0) ptest.awk print " 1 element" ptest.awk genid(A, 1); isort(A, 1); check(A, 1) ptest.awk ptest.awk n = 10 ptest.awk print " " n " random integers" ptest.awk genrand(A, n); isort(A, n); check(A, n) ptest.awk ptest.awk print " " n " sorted integers" ptest.awk gensort(A, n); isort(A, n); check(A, n) ptest.awk ptest.awk print " " n " reverse-sorted integers" ptest.awk genrev(A, n); isort(A, n); check(A, n) ptest.awk ptest.awk print " " n " identical integers" ptest.awk genid(A, n); isort(A, n); check(A, n) ptest.awk } ptest.awk ptest.awk function isort(A,n, i,j,t) { ptest.awk for (i = 2; i <= n; i++) ptest.awk for (j = i; j > 1 && A[j-1] > A[j]; j--) { ptest.awk # swap A[j-1] and A[j] ptest.awk t = A[j-1]; A[j-1] = A[j]; A[j] = t ptest.awk } ptest.awk } ptest.awk ptest.awk # test-generation and sorting routines... ptest.awk ptest.awk function check(A,n, i) { ptest.awk for (i = 1; i < n; i++) ptest.awk if (A[i] > A[i+1]) ptest.awk printf("array is not sorted, element %d\n", i) ptest.awk } ptest.awk ptest.awk function genrand(A,n, i) { # put n random integers in A ptest.awk for (i = 1; i <= n; i++) ptest.awk A[i] = int(n*rand()) ptest.awk } ptest.awk ptest.awk function gensort(A,n, i) { # put n sorted integers in A ptest.awk for (i = 1; i <= n; i++) ptest.awk A[i] = i ptest.awk } ptest.awk ptest.awk function genrev(A,n, i) { # put n reverse-sorted integers ptest.awk for (i = 1; i <= n; i++) # in A ptest.awk A[i] = n+1-i ptest.awk } ptest.awk ptest.awk function genid(A,n, i) { # put n identical integers in A ptest.awk for (i = 1; i <= n; i++) ptest.awk A[i] = 1 ptest.awk } scaff.awk BEGIN { srand(1111) } scaff.awk { print } scaff.awk # interactive test framework for sort routines scaff.awk scaff.awk /^[0-9]+.*rand/ { n = $1; genrand(A, n); dump(A, n); next } scaff.awk /^[0-9]+.*id/ { n = $1; genid(A, n); dump(A, n); next } scaff.awk /^[0-9]+.*sort/ { n = $1; gensort(A, n); dump(A, n); next } scaff.awk /^[0-9]+.*rev/ { n = $1; genrev(A, n); dump(A, n); next } scaff.awk /^data/ { # use data right from this line scaff.awk for (i = 2; i <= NF; i++) scaff.awk A[i-1] = $i scaff.awk n = NF - 1 scaff.awk next scaff.awk } scaff.awk /q.*sort/ { qsort(A, 1, n); check(A, n); dump(A, n); next } scaff.awk /h.*sort/ { hsort(A, n); check(A, n); dump(A, n); next } scaff.awk /i.*sort/ { isort(A, n); check(A, n); dump(A, n); next } scaff.awk /./ { print "data ... | N [rand|id|sort|rev]; [qhi]sort" } scaff.awk scaff.awk function dump(A, n) { # print A[1]..A[n] scaff.awk for (i = 1; i <= n; i++) scaff.awk printf(" %s", A[i]) scaff.awk printf("\n") scaff.awk } scaff.awk scaff.awk # test-generation and sorting routines ... scaff.awk scaff.awk function genrand(A,n, i) { # put n random integers in A scaff.awk for (i = 1; i <= n; i++) scaff.awk A[i] = int(n*rand()) scaff.awk } scaff.awk scaff.awk function gensort(A,n, i) { # put n sorted integers in A scaff.awk for (i = 1; i <= n; i++) scaff.awk A[i] = i scaff.awk } scaff.awk scaff.awk function genrev(A,n, i) { # put n reverse-sorted integers in A scaff.awk for (i = 1; i <= n; i++) scaff.awk A[i] = n+1-i scaff.awk } scaff.awk scaff.awk function genid(A,n, i) { # put n identical integers in A scaff.awk for (i = 1; i <= n; i++) scaff.awk A[i] = 1 scaff.awk } scaff.awk scaff.awk function check(A,n, i) { scaff.awk for (i = 1; i < n; i++) scaff.awk if (A[i] > A[i+1]) scaff.awk printf("error: array is not sorted, element %d\n", i) scaff.awk } scaff.awk function isort(A,n, i,j,t) { scaff.awk for (i = 2; i <= n; i++) scaff.awk for (j = i; j > 1 && A[j-1] > A[j]; j--) { scaff.awk # swap A[j-1] and A[j] scaff.awk t = A[j-1]; A[j-1] = A[j]; A[j] = t scaff.awk } scaff.awk } scaff.awk scaff.awk function qsort(A,left,right, i,last) { scaff.awk if (left >= right) # do nothing if array contains scaff.awk return # at most one element scaff.awk swap(A, left, left + int((right-left+1)*rand())) scaff.awk last = left scaff.awk for (i = left+1; i <= right; i++) scaff.awk if (A[i] < A[left]) scaff.awk swap(A, ++last, i) scaff.awk swap(A, left, last) scaff.awk qsort(A, left, last-1) scaff.awk qsort(A, last+1, right) scaff.awk } scaff.awk scaff.awk function swap(A,i,j, t) { scaff.awk t = A[i]; A[i] = A[j]; A[j] = t scaff.awk } scaff.awk scaff.awk function hsort(A,right, i) { scaff.awk for (i = int(right/2); i >= 1; i--) scaff.awk heapify(A, i, right) scaff.awk for (i = right; i > 1; i--) { scaff.awk swap(A, 1, i) scaff.awk heapify(A, 1, i-1) scaff.awk } scaff.awk } scaff.awk scaff.awk function heapify(A,left,right, p,c) { scaff.awk for (p = left; (c = 2*p) <= right; p = c) { scaff.awk if (c < right && A[c+1] > A[c]) scaff.awk c++ scaff.awk if (A[p] < A[c]) scaff.awk swap(A, c, p) scaff.awk } scaff.awk } iisort.awk # count comparisons and exchanges in isort iisort.awk iisort.awk { A[NR] = $0 } iisort.awk END { comp = exch = 0 iisort.awk isort(A, NR) iisort.awk print "isort", NR, comp, exch iisort.awk } iisort.awk iisort.awk function isort(A,n, i,j,t) { # insertion sort iisort.awk for (i = 2; i <= n; i++) # with counters iisort.awk for (j = i; j > 1 && ++comp && iisort.awk A[j-1] > A[j] && ++exch; j--) { iisort.awk # swap A[j-1] and A[j] iisort.awk t = A[j-1]; A[j-1] = A[j]; A[j] = t iisort.awk } iisort.awk } frame.awk # test framework for sort performance evaluation frame.awk # input: lines with sort name, type of data, sizes... frame.awk # output: name, type, size, comparisons, exchanges, c+e frame.awk frame.awk { for (i = 3; i <= NF; i++) frame.awk test($1, $2, $i) frame.awk } frame.awk frame.awk function test(sort, data, n) { frame.awk comp = exch = 0 frame.awk if (data ~ /rand/) frame.awk genrand(A, n) frame.awk else if (data ~ /id/) frame.awk genid(A, n) frame.awk else if (data ~ /rev/) frame.awk genrev(A, n) frame.awk else frame.awk print "illegal type of data in", $0 frame.awk if (sort ~ /q.*sort/) frame.awk qsort(A, 1, n) frame.awk else if (sort ~ /h.*sort/) frame.awk hsort(A, n) frame.awk else if (sort ~ /i.*sort/) frame.awk isort(A, n) frame.awk else print frame.awk "illegal type of sort in", $0 frame.awk print sort, data, n, comp, exch, comp+exch frame.awk } frame.awk frame.awk # test-generation and sorting routines ... frame.awk frame.awk BEGIN { srand(111) } frame.awk function genrand(A,n, i) { # put n random integers in A frame.awk for (i = 1; i <= n; i++) frame.awk A[i] = int(n*rand()) frame.awk } frame.awk frame.awk function gensort(A,n, i) { # put n sorted integers in A frame.awk for (i = 1; i <= n; i++) frame.awk A[i] = i frame.awk } frame.awk frame.awk function genrev(A,n, i) { # put n reverse-sorted integers in A frame.awk for (i = 1; i <= n; i++) frame.awk A[i] = n+1-i frame.awk } frame.awk frame.awk function genid(A,n, i) { # put n identical integers in A frame.awk for (i = 1; i <= n; i++) frame.awk A[i] = 1 frame.awk } frame.awk frame.awk function check(A,n, i) { frame.awk for (i = 1; i < n; i++) frame.awk if (A[i] > A[i+1]) frame.awk printf("error: array is not sorted, element %d\n", i) frame.awk } frame.awk function isort(A,n, i,j,t) { frame.awk for (i = 2; i <= n; i++) frame.awk for (j = i; j > 1 && ++comp && A[j-1] > A[j] && ++exch; j--) { frame.awk # swap A[j-1] and A[j] frame.awk t = A[j-1]; A[j-1] = A[j]; A[j] = t frame.awk } frame.awk } frame.awk frame.awk function qsort(A,left,right, i,last) { frame.awk if (left >= right) # do nothing if array contains frame.awk return # at most one element frame.awk swap(A, left, left + int((right-left+1)*rand())) frame.awk last = left frame.awk for (i = left+1; i <= right; i++) frame.awk if (++comp && A[i] < A[left]) frame.awk swap(A, ++last, i) frame.awk swap(A, left, last) frame.awk qsort(A, left, last-1) frame.awk qsort(A, last+1, right) frame.awk } frame.awk frame.awk function swap(A,i,j, t) { frame.awk ++exch frame.awk t = A[i]; A[i] = A[j]; A[j] = t frame.awk } frame.awk frame.awk function hsort(A,right, i) { frame.awk for (i = int(right/2); i >= 1; i--) frame.awk heapify(A, i, right) frame.awk for (i = right; i > 1; i--) { frame.awk swap(A, 1, i) frame.awk heapify(A, 1, i-1) frame.awk } frame.awk } frame.awk frame.awk function heapify(A,left,right, p,c) { frame.awk for (p = left; (c = 2*p) <= right; p = c) { frame.awk if (c < right && ++comp && A[c+1] > A[c]) frame.awk c++ frame.awk if (++comp && A[p] < A[c]) frame.awk swap(A, c, p) frame.awk } frame.awk } qsort.awk # quicksort qsort.awk qsort.awk { A[NR] = $0 } qsort.awk qsort.awk END { qsort(A, 1, NR) qsort.awk for (i = 1; i <= NR; i++) qsort.awk print A[i] qsort.awk } qsort.awk qsort.awk # qsort - sort A[left..right] by quicksort qsort.awk qsort.awk function qsort(A,left,right, i,last) { qsort.awk if (left >= right) # do nothing if array contains qsort.awk return # less than two elements qsort.awk swap(A, left, left + int((right-left+1)*rand())) qsort.awk last = left # A[left] is now partition element qsort.awk for (i = left+1; i <= right; i++) qsort.awk if (A[i] < A[left]) qsort.awk swap(A, ++last, i) qsort.awk swap(A, left, last) qsort.awk qsort(A, left, last-1) qsort.awk qsort(A, last+1, right) qsort.awk } qsort.awk qsort.awk function swap(A,i,j, t) { qsort.awk t = A[i]; A[i] = A[j]; A[j] = t qsort.awk } hsort.awk # heapsort hsort.awk hsort.awk { A[NR] = $0 } hsort.awk hsort.awk END { hsort(A, NR) hsort.awk for (i = 1; i <= NR; i++) hsort.awk { print A[i] } hsort.awk } hsort.awk hsort.awk function hsort(A,n, i) { hsort.awk for (i = int(n/2); i >= 1; i--) # phase 1 hsort.awk { heapify(A, i, n) } hsort.awk for (i = n; i > 1; i--) { # phase 2 hsort.awk { swap(A, 1, i) } hsort.awk { heapify(A, 1, i-1) } hsort.awk } hsort.awk } hsort.awk function heapify(A,left,right, p,c) { hsort.awk for (p = left; (c = 2*p) <= right; p = c) { hsort.awk if (c < right && A[c+1] > A[c]) hsort.awk { c++ } hsort.awk if (A[p] < A[c]) hsort.awk { swap(A, c, p) } hsort.awk } hsort.awk } hsort.awk function swap(A,i,j, t) { hsort.awk t = A[i]; A[i] = A[j]; A[j] = t hsort.awk } makeprof # makeprof - prepare profiling version of an awk program makeprof # usage: awk -f makeprof awkprog >awkprog.p makeprof # running awk -f awkprog.p data creates a makeprof # file prof.cnts of statement counts for awkprog makeprof makeprof { if ($0 ~ /{/) sub(/{/, "{ _LBcnt[" ++_numLB "]++; ") makeprof print makeprof } makeprof makeprof END { printf("END { for (i = 1; i <= %d; i++)\n", _numLB) makeprof printf("\t\t print _LBcnt[i] > \"prof.cnts\"\n}\n") makeprof } printprof # printprof - print profiling counts printprof # usage: awk -f printprof awkprog printprof # prints awkprog with statement counts from prof.cnts printprof printprof BEGIN { while (getline < "prof.cnts" > 0) cnt[++i] = $1 } printprof /{/ { printf("%5d", cnt[++j]) } printprof { printf("\t%s\n", $0) } tsort.awk # tsort - topological sort of a graph tsort.awk # input: predecessor-successor pairs tsort.awk # output: linear order, predecessors first tsort.awk tsort.awk { if (!($1 in pcnt)) tsort.awk pcnt[$1] = 0 # put $1 in pcnt tsort.awk pcnt[$2]++ # count predecessors of $2 tsort.awk slist[$1, ++scnt[$1]] = $2 # add $2 to successors of $1 tsort.awk } tsort.awk END { for (node in pcnt) { tsort.awk nodecnt++ tsort.awk if (pcnt[node] == 0) # if it has no predecessors tsort.awk q[++back] = node # queue node tsort.awk } tsort.awk for (front = 1; front <= back; front++) { tsort.awk printf(" %s", node = q[front]) tsort.awk for (i = 1; i <= scnt[node]; i++) tsort.awk if (--pcnt[slist[node, i]] == 0) tsort.awk # queue s if it has no more predecessors tsort.awk q[++back] = slist[node, i] tsort.awk } tsort.awk if (back != nodecnt) tsort.awk print "\nerror: input contains a cycle" tsort.awk printf("\n") tsort.awk } dfs.awk # dfs - depth-first search for cycles dfs.awk dfs.awk function dfs(node, i, s) { dfs.awk visited[node] = 1 dfs.awk for (i = 1; i <= scnt[node]; i++) dfs.awk if (visited[s = slist[node, i]] == 0) dfs.awk dfs(s) dfs.awk else if (visited[s] == 1) dfs.awk print "cycle with back edge (" node ", " s ")" dfs.awk visited[node] = 2 dfs.awk } rtsort.awk # rtsort - reverse topological sort rtsort.awk # input: predecessor-successor pairs rtsort.awk # output: linear order, successors first rtsort.awk rtsort.awk { if (!($1 in pcnt)) rtsort.awk pcnt[$1] = 0 # put $1 in pcnt rtsort.awk pcnt[$2]++ # count predecessors of $2 rtsort.awk slist[$1, ++scnt[$1]] = $2 # add $2 to successors of $1 rtsort.awk } rtsort.awk rtsort.awk END { for (node in pcnt) { rtsort.awk nodecnt++ rtsort.awk if (pcnt[node] == 0) rtsort.awk rtsort(node) rtsort.awk } rtsort.awk if (pncnt != nodecnt) rtsort.awk print "error: input contains a cycle" rtsort.awk printf("\n") rtsort.awk } rtsort.awk rtsort.awk function rtsort(node, i, s) { rtsort.awk visited[node] = 1 rtsort.awk for (i = 1; i <= scnt[node]; i++) rtsort.awk if (visited[s = slist[node, i]] == 0) rtsort.awk rtsort(s) rtsort.awk else if (visited[s] == 1) rtsort.awk printf("error: nodes %s and %s are in a cycle\n", rtsort.awk s, node) rtsort.awk visited[node] = 2 rtsort.awk printf(" %s", node) rtsort.awk pncnt++ # count nodes printed rtsort.awk } make.awk # make - maintain dependencies make.awk make.awk BEGIN { make.awk while (getline <"makefile" > 0) make.awk if ($0 ~ /^[A-Za-z]/) { # $1: $2 $3 ... make.awk sub(/:/, "") make.awk if (++names[nm = $1] > 1) make.awk error(nm " is multiply defined") make.awk for (i = 2; i <= NF; i++) # remember targets make.awk slist[nm, ++scnt[nm]] = $i make.awk } else if ($0 ~ /^\t/) # remember cmd for make.awk cmd[nm] = cmd[nm] $0 "\n" # current name make.awk else if (NF > 0) make.awk error("illegal line in makefile: " $0) make.awk ages() # compute initial ages make.awk if (ARGV[1] in names) { make.awk if (update(ARGV[1]) == 0) make.awk print ARGV[1] " is up to date" make.awk } else make.awk error(ARGV[1] " is not in makefile") make.awk } make.awk make.awk function ages( f,n,t) { make.awk for (t = 1; ("ls -t" | getline f) > 0; t++) make.awk age[f] = t # all existing files get an age make.awk close("ls -t") make.awk for (n in names) make.awk if (!(n in age)) # if n has not been created make.awk age[n] = 9999 # make n really old make.awk } make.awk function update(n, changed,i,s) { make.awk if (!(n in age)) error(n " does not exist") make.awk if (!(n in names)) return 0 make.awk changed = 0 make.awk visited[n] = 1 make.awk for (i = 1; i <= scnt[n]; i++) { make.awk if (visited[s = slist[n, i]] == 0) update(s) make.awk else if (visited[s] == 1) make.awk error(s " and " n " are circularly defined") make.awk if (age[s] <= age[n]) changed++ make.awk } make.awk visited[n] = 2 make.awk if (changed || scnt[n] == 0) { make.awk printf("%s", cmd[n]) make.awk system(cmd[n]) # execute cmd associated with n make.awk ages() # recompute all ages make.awk age[n] = 0 # make n very new make.awk return 1 make.awk } make.awk return 0 make.awk } make.awk function error(s) { print "error: " s; exit }
__label__pos
0.99469
Smartphone Root Redmi 2 [100% Working] With CWM/TWRP and SuperSu Comments (9) 1. Jyoti prakash says: Hey buddy. Thanks. The instructions were simple and easy. So thanks. 2. Abhishek says: The device version I have is 2014818.As there are many versions,will files for wt88047 be supported? 3. balaji says: super helped me to root my device quickly and safely 1. Devendra says: Glad, it could help. 4. vishal vishwakarma says: thank you for guide about root redmi 2 5. r Jooseph says: thanks for your help.but if am rooting the device suddenly if it bricks , can i bring it back to my stock rom i.e MIUI 8 and is there any need of the backup? 1. Devendra says: Hello Jooseph, If you have the same version or newer fastboot version ROM, flash it using MIFlash tool while selecting “Save User data” option. SO that it won’t delete anything and it’ll be like you’ll continue where you left. 6. Drego says: Thanks. It was hassle-free and I could do it easily!! 7. saurabh says: thankyou brother it is very simple to follow your instruction. thankyou so much Leave a Reply Your email address will not be published. Required fields are marked *
__label__pos
0.98116
64 As I understand it, the override keyword states that a given declaration implements a base virtual method, and the compilation should fail if there is no matching base method found. My understanding of the final keyword is that it tells the compiler that no class shall override this virtual function. So is override final redundant? It seems to compile fine. What information does override final convey that final does not? What is the use case for such a combination? 55 final does not require the function to override anything in the first place. Its effect is defined in [class.virtual]/4 as If a virtual function f in some class B is marked with the virt-specifier final and in a class D derived from B a function D::f overrides B::f, the program is ill-formed. That's it. Now override final would simply mean „This function overrides a base class one (override) and cannot be overriden itself (final).“ final on it's own would impose a weaker requirement. override and final have independent behavior. Note that final can only be used for virtual functions though - [class.mem]/8 A virt-specifier-seq shall appear only in the declaration of a virtual member function (10.3). Hence the declaration void foo() final; Is effectively the same as virtual void foo() final override; Since both require foo to override something - the second declaration by using override, and the first one by being valid if and only if foo is implicitly virtual, i.e. when foo is overriding a virtual function called foo in a base class, which makes foo in the derived one automatically virtual. Thus override would be superfluous in declarations where final, but not virtual, occurs. Still, the latter declaration expresses the intent a lot clearer and should definitely be preferred. 11 • 10 I like your answer, but I'd like to clarify that from a practical point of view virtual void f() final override and void f() final are equivalent in the sense that both of them fail if they do not override something. final is only valid for virtual functions and the latter declaration of f is only virtual if it overrides a function. Error messages for the latter one may be less precise though. Apr 3 '15 at 11:58 • 5 the latter declaration expresses the intent a lot clearer - I don't think so. Your description has led me to prefer the first declaration style for the same reason I'd avoid to add virtual when overriding a virtual method: it adds no value. But it's of course better than writing virtual void foo() final; – Wolf Oct 5 '17 at 14:36 • 1 @Wolf You don't think it expresses the intent clearer? Could you articulate exactly what the intent is that is expressed by the first declaration form? – Columbo Oct 5 '17 at 14:37 • 1 I think final has to be virtual after your description (I'm just learning these new features) and since you omit the virtual keyword, it wouldn't compile if it was not an override. Or maybe I am overlooking a case that could lead to unwanted behaviour? The use of non-overriding final virtual I'm just trying to figure out below Angew's answer ... – Wolf Oct 5 '17 at 14:51 • 1 I should add that bot variants you list in your answer are clear concerning their intent, only virtual void foo() final; would be unclear. – Wolf Oct 5 '17 at 14:55 21 final does not necessarily imply that the function is overridden. It's perfectly valid (if of somewhat dubious value) to declare a virtual function as final on its first declaration in the inheritance hierarchy. One reason I can think of to create a virtual and immediately final function is if you want to prevent a derived class from giving the same name & parameters a different meaning. 7 • 1 +1 For showing a feasible use-case of a virtual/immediately-final function. I wonder if the compiler still generates a vtable when this is done though? – Carlton Apr 2 '15 at 12:52 • 2 @Carlton That's up to the compiler. As far as the standard is concerned, it makes the class polymorphic, however - which means dynamic_cast must work for it, for example. I believe vtable implementations of virtual functions also need the vtable to make dynamic_cast work. Apr 2 '15 at 12:55 • Bahh, I just posted a question based on my previous comment, but you pretty much just answered it here. Thanks. – Carlton Apr 2 '15 at 13:05 • 2 If you want to prevent a derived class from giving the same name & parameters a different meaning, don't make it a virtual method. I don't see a feasible use-case in this vague description (unlike @Carlton), but I am really interested in one. – Wolf Oct 5 '17 at 14:26 • @Wolf I meant same non-qualified name, of course. Comments can't show code well, but I've written an example offsite. Derived2 cannot hide mustRemainBase. Oct 5 '17 at 14:33 6 (Skip to the end to see the conclusion if you're in a hurry.) Both override and final can appear only in declaration in a virtual function. And both key words can be used in the same function declaration, but whether it is useful to use them both depends on situations. Take the following code as an example: #include <iostream> using std::cout; using std::endl; struct B { virtual void f1() { cout << "B::f1() "; } virtual void f2() { cout << "B::f2() "; } virtual void f3() { cout << "B::f3() "; } virtual void f6() final { cout << "B::f6() "; } void f7() { cout << "B::f7() "; } void f8() { cout << "B::f8() "; } void f9() { cout << "B::f9() "; } }; struct D : B { void f1() override { cout << "D::f1() "; } void f2() final { cout << "D::f2() "; } void f3() override final { cout << "D::f3() "; } // need not have override // should have override, otherwise add new virtual function virtual void f4() final { cout << "D::f4() "; } //virtual void f5() override final; // Error, no virtual function in base class //void f6(); // Error, override a final virtual function void f7() { cout << "D::f7() "; } virtual void f8() { cout << "D::f8() "; } //void f9() override; // Error, override a nonvirtual function }; int main() { B b; D d; B *bp = &b, *bd = &d; D *dp = &d; bp->f1(); bp->f2(); bp->f3(); bp->f6(); bp->f7(); bp->f8(); bp->f9(); cout << endl; bd->f1(); bd->f2(); bd->f3(); bd->f6(); bd->f7(); bd->f8(); bd->f9(); cout << endl; dp->f1(); dp->f2(); dp->f3(); dp->f6(); dp->f7(); dp->f8(); dp->f9(); cout << endl; return 0; } The output is B::f1() B::f2() B::f3() B::f6() B::f7() B::f8() B::f9() D::f1() D::f2() D::f3() B::f6() B::f7() B::f8() B::f9() D::f1() D::f2() D::f3() B::f6() D::f7() D::f8() B::f9() 1. Compare f1() and f6(). We know that override and final is indepent sematically. • override means the function is overriding a virtual function in its base class. See f1() and f3(). • final means the function cannot be overrided by its derived class. (But the function itself need not override a base class virtual function.) See f6() and f4(). 2. Compare f2() and f3(). We know that if a member function is declared without virtual and with final, it means that it already override a virtual function in base class. In this case, the key word override is redundant. 3. Compare f4() and f5(). We know that if a member function is declared with virtualand if it is not the first virtual function in inheritance hierarchy, then we should use override to specify the override relationship. Otherwise, we may accidentally add new virtual function in derived class. 4. Compare f1() and f7(). We know that any member function, not just virtual ones, can be overridden in derived class. What virtual specifies is polymorphism, which means the decision as to which function to run is delayed until run time instead of compile time. (This should be avoid in practice.) 5. Compare f7() and f8(). We know that we can even override a base class function and make it a new virtual one. (Which means any member function f8() of class derived from D will be virtual.) (This should be avoid in practice too.) 6. Compare f7() and f9(). We know that override can help us find the error when we want to override a virtual function in derived class while forgot to add key word virtual in base class. In conclusion, the best practice in my own view is: • only use virtual in declaration of the first virtual function in base class; • always use override to specify override virtual function in derived class, unless final is also specified. 2 The following code (with the final specifier) compiles. But compilation fails when final is replaced with override final. Thus override final conveys more information (and prevents compilation) than just final. class Base { public: virtual ~Base() {} }; class Derived : public Base { public: virtual void foo() final { std::cout << "in Derived foo\n"; } }; Essentially, override final says this method cannot be overridden in any derived class and this method overrides a virtual method in a base class. final alone doesn't specify the base class overriding part. 1 • 1 "final alone doesn't specify the base class overriding part." Correct, but void foo() final does. Dec 22 '15 at 13:02 1 No final does not necessarily imply override. In fact, you could declare a virtual function that you immediately declare final see here. The final keyword simply states that no derived class can create an override of this function. The override keyword is important in that it enforces that you are indeed actually overriding a virtual function (instead of declaring a new unrelated one). See this post regarding override So long story short, they each serve their own particular purpose, and it is often correct to use both. 2 • 3 Hmm, I wish that first example you posted wouldn't compile, because the way I see it declaring a virtual function and making it final in the same breath makes absolutely no sense at all. – antred Apr 15 '15 at 9:37 • 2 cpp core guidelines say only use one github.com/isocpp/CppCoreGuidelines/blob/master/… Oct 26 '17 at 20:27 Your Answer By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.811854
How to Factor & Simplify Radical Expressions by Tamara Runzel, Demand Media Radicals are also known as roots, which are the reverse of exponents. With exponents, you raise a number to a certain power. With roots or radicals, you break down the number. Radical expressions can contain numbers and/or variables. To simplify a radical expression, you must first factor the expression. A radical is simplified when you can not take out any other roots. Simplifying Radical Expressions With No Variables Identify the parts of a radical expression. The check-mark like symbol is called the "radical" or "root" symbol. The numbers and variables under the symbol are called the "radicand". If there is a small number outside the check mark, that is called the "index". Every root except a square root has an "index". For example, a cubed root would have a small three outside the radical symbol and that three is the "index" of the cubed root. Factor the "radicand" so that at least one factor has a perfect square. A perfect square exists when one number times itself equals the "radicand". For example, with the square root of 200, you could factor it out to the "square root of 100 times the square root of 2". You could also factor it out to "25 times 8", but you would need to take that one step further since you could break "8" into "4 times 2". Figure out the square root of the factor that has a perfect square. In the example, the square root of 100 is 10. The 2 does not have a square root. Rewrite your simplified radical as "10 square root of 2". If the index is a number other than a square root, you have to find that root. For example, the cubed root of 128 is factored out as the "cubed root of 64 times the cubed root of 2". The cubed root of 64 is 4, so your new expression is "4 cubed root of 2". Simplifying Radical Expressions With Variables Factor out the radicand, including variables. Use the example, the cubed root of "81a^5 b^4." Factor 81 so that one of the factors has a cubed root. At the same time, separate the variables so that they are raised to the third power. The example is now the cubed root of "27a^3 b^3" times the cubed root of "3a^2 b." Figure out the cubed root. In the example, the cubed root of 27 is 3 because 3 times 3 times 3 equals 27. You can also remove the exponents from the first factor because the cubed root of something raised to the third power is one. Rewrite your expression as "3ab" cubed root of "3a^2b." Tip • Combine any radicals with the same index number by multiplying or dividing. For example, the cubed root of 3 times the cubed root of 2 becomes the cubed root of 6. The square root of 50 over the square root of 5 becomes the square root of 10. About the Author Tamara Runzel has been writing military, parenting, family and relationship articles since 2008. Runzel started in television news, followed by education before deciding to be a stay at home mom. Her articles have appeared in military publications as well as numerous online publications. She holds a Bachelor of Arts in communication from University of the Pacific. Photo Credits
__label__pos
0.74763
Structured Data Information that is highly organized and formatted in a way that is easily searchable and accessible by computer systems, typically stored in databases.   Structured data is essential in the field of AI as it allows for efficient storage, retrieval, and analysis, which are foundational for machine learning models and data-driven decision-making processes. It is characterized by its organization into predefined models, such as tables with rows and columns in relational databases, where each data field is discrete and adheres to a specific data type and constraints. This high level of organization and predictability makes structured data ideal for tasks that require precision and speed in data processing, enabling algorithms to perform complex queries and analyses with relative ease. Structured data formats are widely used in various applications, including financial records, inventory management, and customer relationship management systems, where consistency and accuracy in data handling are paramount. Historical overview: The concept of structured data has been around since the early days of computing, but it gained prominence with the advent of relational databases in the 1970s, pioneered by Edgar F. Codd's work on the relational model. This period marked the beginning of structured data's widespread adoption in information technology systems, facilitating the organized storage and efficient processing of data. Key contributors: Edgar F. Codd, an English computer scientist working at IBM, is a key figure in the development of structured data concepts, particularly through his seminal work on the relational database model, which laid the foundation for structured data management and manipulation.
__label__pos
0.930372
T-SQL: Dynamic SQL and SQL Injection (sp_executesql) When we create dynamic SQL code, we can add the parameters in two ways: 1. Concatenate them into the dynamic code with + 2. Use sp_executesql In this example I will show both scenarios when a SQL injection is involved. First Name Concatenate the parameters into the dynamic code with + Concatenate Concatenate Print Concatenate and SQL injection. Concatenate SQL Injection Concatenate SQL Injection Print Plug the parameters into the dynamic code with sp_executesql sp_executesql sp_executesql Print sp_executesql and SQL Injection sp_executesql SQL Injection sp_executesql SQL Injection Print As shown in the example above, it is recommended to execute dynamic SQL only with sp_executesql. Keep it simple :-) Leave a comment Your email address will not be published. Required fields are marked *
__label__pos
0.871006
Kotlin Multiplatform Mobile Docs Help Add dependencies to KMM modules Every application requires a set of libraries in order to operate successfully. A KMM application can depend on multiplatform libraries that work on both iOS and Android, and it can depend on platform-specific iOS and Android libraries. Here you can learn how to add: Multiplatform libraries You can add dependencies on libraries that have adopted Kotlin Multiplatform technology, such as kotlinx.coroutines and SQLDelight. The authors of these libraries usually provide guides for adding their dependencies to your project. This page covers basic dependency use cases: Learn more about configuring dependencies. Check out this community-maintained list of Kotlin Multiplatform libraries. Dependency on the Kotlin standard library The Kotlin standard library is added automatically to all multiplatform projects, you don’t have to do anything manually. Dependency on a library shared for all source sets If you want to use a library from all source sets, you can add it only to the common source set. The Kotlin Multiplatform Mobile plugin will add the corresponding parts to any other source sets automatically. kotlin { sourceSets { commonMain { dependencies { implementation 'org.jetbrains.kotlinx:kotlinx-coroutines-core:1.4.2' } } androidMain { dependencies { //dependency to platform part of kotlinx.coroutines will be added automatically } } } } kotlin { sourceSets["commonMain"].dependencies { implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.4.2") } sourceSets["androidMain"].dependencies { //dependency to platform part of kotlinx.coroutines will be added automatically } } Dependency on a library used in specific source sets If you want to use a multiplatform library just for specific source sets, you can add it exclusively to them. The specified library declarations will then be available only in those source sets. kotlin { sourceSets { commonMain { dependencies { // kotlinx.coroutines will be available in all source sets implementation 'org.jetbrains.kotlinx:kotlinx-coroutines-core:1.4.2' } } androidMain { dependencies { } } iosMain { dependencies { // SQLDelight will be available only in the iOS source set, but not in Android or common implementation 'com.squareup.sqldelight:native-driver:1.4.1' } } } } kotlin { sourceSets["commonMain"].dependencies { //kotlinx.coroutines will be available in all source sets implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.4.2") } sourceSets["androidMain"].dependencies { } sourceSets["iosX64Main"].dependencies { //SQLDelight will be available only in the iOS source set, but not in Android or common implementation("com.squareup.sqldelight:native-driver:1.4.1) } } Dependency on another multiplatform project You can connect one multiplatform project to another as a dependency. To do this, simply add a project dependency to the source set that needs it. If you want to use a dependency in all source sets, add it to the common one. In this case, other source sets will get their versions automatically. kotlin { sourceSets { commonMain { dependencies { implementation project(':some-other-multiplatform-module') } } androidMain { dependencies { //platform part of :some-other-multiplatform-module will be added automatically } } } } kotlin { sourceSets["commonMain"].dependencies { implementation(project(":some-other-multiplatform-module")) } sourceSets["androidMain"].dependencies { //platform part of :some-other-multiplatform-module will be added automatically } } iOS dependencies Apple SDK dependencies (such as Foundation or Core Bluetooth) are available as a set of prebuilt libraries in Kotlin Multiplatform Mobile projects. They do not require any additional configuration. You can also reuse other libraries and frameworks from the iOS ecosystem in your iOS source sets. Kotlin supports interoperability with Objective-C dependencies and Swift dependencies if their APIs are exported to Objective-C with the @objc attribute. Pure Swift dependencies are not yet supported. Integration with the CocoaPods dependency manager is also supported with the same limitation – you cannot use pure Swift pods. We recommend using CocoaPods to handle iOS dependencies in Kotlin Multiplatform Mobile (KMM) projects. Manage dependencies manually only if you want to tune the interop process specifically or if you have some other strong reason to do so. With CocoaPods 1. Perform initial CocoaPods integration setup 2. Add a dependency on a Pod library from the CocoaPods repository that you want to use by including pod() in the build script of your project. kotlin { cocoapods { //.. pod('AFNetworking') { version = '~> 4.0.1' } } } kotlin { cocoapods { //.. pod("AFNetworking") { version = "~> 4.0.1" } } } 3. Re-import the project. To use the dependency in your Kotlin code, import the package cocoapods.<library-name>. In the example above, that would be: import cocoapods.AFNetworking.* Learn more about CocoaPods integration. Without CocoaPods If you don’t want to use CocoaPods, you can use the cinterop tool to create Kotlin bindings for Objective-C or Swift declarations. This will allow you to call them from Kotlin code. To do this: 1. Download your dependency. 2. Build it to get its binaries. 3. Create a special .def file that describes this dependency to cinterop. 4. Adjust your build script to generate bindings during the build. The steps differ a bit for libraries and frameworks, but the idea remains the same. Add a library without CocoaPods 1. Download the library source code and place it somewhere where you can reference it from your project. 2. Build a library (library authors usually provide a guide on how to do this) and get a path to the binaries. 3. In your project, create a .def file, for example DateTools.def. 4. Add a first string to this file: language = Objective-C. If you want to use a pure C dependency, omit the language property. 5. Provide values for two mandatory properties: • headers describes which headers will be processed by cinterop. • package sets the name of the package these declarations should be put into. For example: headers = DateTools.h package = DateTools 6. Add information about interoperability with this library to the build script: • Pass the path to the .def file. This path can be omitted if your .def file has the same name as cinterop and is placed in the src/nativeInterop/cinterop/ directory. • Tell cinterop where to look for header files using the includeDirs option. • Configure linking to library binaries. kotlin { iosX64 { compilations.main { cinterops { DateTools { // Path to .def file defFile("src/nativeInterop/cinterop/DateTools.def") // Directories for header search (an analogue of the -I<path> compiler option) includeDirs("include/this/directory", "path/to/another/directory") } anotherInterop { /* ... */ } } } binaries.all { // Linker options required to link to the library. linkerOpts "-L/path/to/library/binaries", "-lbinaryname" } } } kotlin { iosX64() { compilations.getByName("main") { val DateTools by cinterops.creating { // Path to .def file defFile("src/nativeInterop/cinterop/DateTools.def") // Directories for header search (an analogue of the -I<path> compiler option) includeDirs("include/this/directory", "path/to/another/directory") } val anotherInterop by cinterops.creating { /* ... */ } } binaries.all { // Linker options required to link to the library. linkerOpts("-L/path/to/library/binaries", "-lbinaryname") } } } 7. Build the project. Now you can use this dependency in your Kotlin code. To do that, import the package you’ve set up in the package property in the .def file. For the example above, this will be: import DateTools.* Add a framework without CocoaPods 1. Download the framework source code and place it somewhere that you can reference it from your project. 2. Build the framework (framework authors usually provide a guide on how to do this) and get a path to the binaries. 3. In your project, create a .def file, for example MyFramework.def. 4. Add the first string to this file: language = Objective-C. If you want to use a pure C dependency, omit the language property. 5. Provide values for these two mandatory properties: • modules – the name of the framework that should be processed by the cinterop. • package – the name of the package these declarations should be put into. For example: modules = MyFramework package = MyFramework 6. Add information about interoperability with the framework to the build script: • Pass the path to the .def file. This path can be omitted if your .def file has the same name as the cinterop and is placed in the src/nativeInterop/cinterop/ directory. • Pass the framework name to the compiler and linker using the -framework option. Pass the path to the framework sources and binaries to the compiler and linker using the -F option. kotlin { iosX64 { compilations.main { cinterops { DateTools { // Path to .def file defFile("src/nativeInterop/cinterop/MyFramework.def") compilerOpts("-framework", "MyFramework", "-F/path/to/framework/") } anotherInterop { /* ... */ } } } binaries.all { // Tell the linker where the framework is located. linkerOpts("-framework", "MyFramework", "-F/path/to/framework/") } } } kotlin { iosX64() { compilations.getByName("main") { val DateTools by cinterops.creating { // Path to .def file defFile("src/nativeInterop/cinterop/DateTools.def") compilerOpts("-framework", "MyFramework", "-F/path/to/framework/" } val anotherInterop by cinterops.creating { /* ... */ } } binaries.all { // Tell the linker where the framework is located. linkerOpts("-framework", "MyFramework", "-F/path/to/framework/") } } } 7. Build the project. Now you can use this dependency in your Kotlin code. To do this, import the package you’ve set up in the package property in the .def file. For the example above, this will be: import MyFramework.* Learn more about Objective-C and Swift interop and configuring cinterop from Gradle. Workaround to enable IDE support for the shared iOS source set Due to a known issue, you won't be able to use IDE features, such as code completion and highlighting, for the shared iOS source set in a multiplatform project with hierarchical structure support if your project depends on: • Multiplatform libraries that don't support the hierarchical structure. • Third-party iOS libraries, with the exception of platform libraries supported out of the box. This issue applies only to the shared iOS source set. The IDE will correctly support the rest of the code. To enable IDE support in these cases, you can work around the issue by adding the following code to build.gradle.(kts) in the shared directory of your project: def iosTarget if (System.getenv("SDK_NAME")?.startsWith("iphoneos")) { iosTarget = kotlin.&iosArm64 } else { iosTarget = kotlin.&iosX64 } val iosTarget: (String, KotlinNativeTarget.() -> Unit) -> KotlinNativeTarget = if (System.getenv("SDK_NAME")?.startsWith("iphoneos") == true) ::iosArm64 else ::iosX64 iosTarget("ios") In this code sample, the configuration of iOS targets depends on the environment variable SDK_NAME, which is managed by Xcode. For each build, you'll have only one iOS target, named ios, that uses the iosMain source set. There will be no hierarchy of the iosMain, iosArm64, and iosX64 source sets. Android dependencies The workflow for adding Android-specific dependencies to a KMM module is the same as it is for pure Android projects: add a line to your Gradle build script declaring the dependency you need and import the project. You’ll then be able to use this dependency in your Kotlin code. We recommend adding Android dependencies to KMM projects by adding them to a specific Android source set: sourceSets { androidMain { dependencies { implementation 'com.example.android:app-magic:12.3' } } } sourceSets["androidMain"].dependencies { implementation("com.example.android:app-magic:12.3") } Moving what was a top-level dependency in an Android project to a specific source set in a KMM project might be difficult if the top-level dependency had a non-trivial configuration name. For example, to move а debugImplementation dependency from the top level of an Android project, you’ll need to add an implementation dependency to the source set named androidDebug. To minimize the effort you have to put in to deal with migration problems like this, you can add a dependencies block inside the android block: android { ... dependencies { implementation 'com.example.android:app-magic:12.3' } } android { ... dependencies { implementation("com.example.android:app-magic:12.3") } } Dependencies declared here will be treated exactly the same as dependencies from the top-level block, but declaring them this way will also separate Android dependencies visually in your build script and make it less confusing. Putting dependencies into a standalone dependencies block at the end of the script, in a way that is idiomatic to Android projects, is also supported. However, we strongly recommend against doing this because configuring a build script with Android dependencies in the top-level block and other target dependencies in each source set is likely to cause confusion. Learn more about adding dependencies in Android documentation. Last modified: 29 March 2021
__label__pos
0.991333
早报:《这是自身的烟尘》开发商领衔,波兰独自游戏厂商大举进军国内 单元测试 单元测试使用unittest。 mock是友善创建了MockClient,因为unittest还未曾asyncio的mock,并且sanic的测试接口也是殡葬request请求,所以相比较麻烦. 先前时期可以拔取pytest。 Example: from sanic_ms.tests import APITestCase from server import app class TestCase(APITestCase): _app = app _blueprint = 'visit' def setUp(self): super(TestCase, self).setUp() self._mock.get('/cities/1', payload={'id': 1, 'name': 'shanghai'}) self._mock.get('/roles/1', payload={'id': 1, 'name': 'shanghai'}) def test_create_user(self): data = { 'name': 'test', 'age': 2, 'city_id': 1, 'role_id': 1, } res = self.client.create_user(data=data) body = ujson.loads(res.text) self.assertEqual(res.status, 200) • 其中_blueprint为blueprint名称 • 在setUp函数中,使用_mock来注册mock音讯, 这样就不会访问真正的服务器, payload为回去的body音讯 • 使用client变量调用各类函数, data为body消息,params为路径的参数新闻,其他参数是route的参数 4.《城市:天际线》“黄色之城”扩张包上线,序列折扣持续中 中间件 @app.middleware('request') async def cros(request): if request.method == 'POST' or request.method == 'PUT': request['data'] = request.json span = before_request(request) request['span'] = span @app.middleware('response') async def cors_res(request, response): span = request['span'] if 'span' in request else None if response is None: return response result = {'code': 0} if not isinstance(response, HTTPResponse): if isinstance(response, tuple) and len(response) == 2: result.update({ 'data': response[0], 'pagination': response[1] }) else: result.update({'data': response}) response = json(result) if span: span.set_tag('http.status_code', "200") if span: span.set_tag('component', request.app.name) span.finish() return response • 创办span, 用于日志追踪 • 对response进行包装,统一格式 挪动之间,游戏师长出席新形式“无尽远征”,原本争权格局中的杂兵将被骷髅代替,这么些遗骨固然可以一击杀死,但它们的攻击力比经常士兵强大的多。 丰富处理 使用 app.error_handler = CustomHander() 对抛出的丰裕举行处理 Example: from sanic_ms.exception import ServerError @visit_bp.delete('/users/<id:int>') async def del_user(request, id): raise ServerError(error='内部错误',code=10500, message="msg") • code: 错误码,无丰裕时为0,另外值都为异常 • message: 状态码音讯 • error: 自定义错误音信 • status_code: http状态码,使用专业的http状态码 2.《空洞骑士》第二个免费扩大包下一周上线 介绍 行使python做web开发面临的一个最大的题目就是性质,在缓解C10K问题上显的有点困难。有些异步框架Tornado、Twisted、Gevent 等就是为着解决性能问题。这个框架在性能上稍稍提高,不过也油不过生了各个奇异的题材难以解决。 在python3.6中,官方的异步协程库asyncio正式成为专业。在保留便捷性的还要对性能有了很大的升级,已经面世过多的异步框架使用asyncio。 选择较早的异步框架是aiohttp,它提供了server端和client端,对asyncio做了很好的包装。但是开发形式和最盛行的微框架flask不同,flask开发简单,轻量,高效。 微服务是最近最火开发情势,它解决了复杂问题,提升开支功用,便于安排等优点。 幸而结合那多少个亮点, 以Sanic为底蕴,集成六个流行的库来搭建微服务。 Sanic框架是和Flask相似的异步协程框架,简单轻量,并且性能很高。 本项目就是以Sanic为根基搭建的微服务框架。 美利坚同盟国卡脚模拟》在杉果有售,感兴趣的玩家不妨来看望。 使用 尽管如此官方尚未披露“新墨西哥”何时能与我们晤面,然则她们表示那么些DLC已经八九不离十完工了。 异步处理 由于使用的是异步框架,可以将有些IO请求并行处理 Example: async def async_request(datas): # async handler request results = await asyncio.gather(*[data[2] for data in datas]) for index, obj in enumerate(results): data = datas[index] data[0][data[1]] = results[index] @user_bp.get('/<id:int>') @doc.summary("get user info") @doc.description("get user info by id") @doc.produces(Users) async def get_users_list(request, id): async with request.app.db.acquire(request) as cur: record = await cur.fetch( """ SELECT * FROM users WHERE id = $1 """, id) datas = [ [record, 'city_id', get_city_by_id(request, record['city_id'])] [record, 'role_id', get_role_by_id(request, record['role_id'])] ] await async_request(datas) return record get_city_by_id, get_role_by_id是并行处理。 就像你预料的一致,新墨西哥具备震撼的大山里和孤寂的高速公路,游戏中似乎还会暴发阻碍交通的坠机事件,emmm……坠机这种严重事故真的有那么频繁吗? 模型设计 & ORM Peewee is a simple and small ORM. It has few (but expressive) concepts, making it easy to learn and intuitive to use。 ORM使用peewee, 只是用来做模型设计和migration, 数据库操作使用asyncpg。 Example: # models.py class Users(Model): id = PrimaryKeyField() create_time = DateTimeField(verbose_name='create time', default=datetime.datetime.utcnow) name = CharField(max_length=128, verbose_name="user's name") age = IntegerField(null=False, verbose_name="user's age") sex = CharField(max_length=32, verbose_name="user's sex") city_id = IntegerField(verbose_name='city for user', help_text=CityApi) role_id = IntegerField(verbose_name='role for user', help_text=RoleApi) class Meta: db_table = 'users' # migrations.py from sanic_ms.migrations import MigrationModel, info, db class UserMigration(MigrationModel): _model = Users # @info(version="v1") # def migrate_v1(self): # migrate(self.add_column('sex')) def migrations(): try: um = UserMigration() with db.transaction(): um.auto_migrate() print("Success Migration") except Exception as e: raise e if __name__ == '__main__': migrations() • 运行命令 python migrations.py • migrate_v1函数添加字段sex, 在BaseModel中要先添加name字段 • info装饰器会创造表migrate_record来记录migrate,version每个model中务必唯一,使用version来记录是否执行过,还足以记录author,datetime • migrate函数必须以migrate_开头 另外,11 bit studios即将代理发行的几款来自波兰独立游戏开发者的游艺随笔如《寒霜重打击乐》(Frostpunk)、《57号塔》(Tower 57)、《夜勤人》(Moonlighter)等也将在上架杉果,各位玩家可以关注小杉果的持续报道。 数据库操作 asyncpg is the fastest driver among common Python, NodeJS and Go implementations 运用asyncpg为数据库驱动, 对数据库连接举办打包, 执行数据库操作。 不应用ORM做数据库操作,一个缘由是性质,ORM会有性能的耗费,并且无法采用asyncpg高性能库。另一个是单个微服务是很简短的,表结构不会很复杂,简单的SQL语句就足以处理来,没必要引入ORM。使用peewee只是做模型设计 Example: sql = "SELECT * FROM users WHERE name=$1" name = "test" async with request.app.db.acquire(request) as cur: data = await cur.fetchrow(sql, name) async with request.app.db.transaction(request) as cur: data = await cur.fetchrow(sql, name) • acquire() 函数为非事务, 对于只关乎到查询的利用非事务,可以增长查询效率 • tansaction() 函数为工作操作,对于增删改必须拔取工作操作 • 传扬request参数是为了获取到span,用于日志追踪 • TODO 数据库读写分离 游戏还进入了地图标记功用,玩家在不可能赢得道具时方可先举办标记,等到拿到新力量后再回头尝试。 Swagger API 1514528294957.jpg 现年万圣节以内,育碧将为旗下玩耍荣耀战魂开启一项特别活动“异世界的国宴”(Feast of the Otherworld)。 连带连接 swagger 和一般娱乐推出DLC骗钱被狂喷不同,玩家们纷纷为《棕色之城》打出了好评,DLC推出不到一天时间里积累评价84篇,好评则占到了72篇。游戏唯一的差评点似乎是新扩充包对mod的支撑并不到家,而非mod玩家则一心无需担心,可以购买游戏。 Response 数据 在回到时,不要回来sanic的response,直接重临原始数据,会在Middleware中对回到的数码开展处理,重临统一的格式,具体的格式可以[查看] 新情势下,占领目的点仅能获取200点临时点数奖励,要想让点数持续上升必须击杀敌方骷髅,这迫使玩家必须外出交战。 相关连接 opentracing zipkin jaeger 说到底还有理念最重大的事只好说,为了庆祝11 bit studios和杉果家的协作,该发行商旗下全方位游戏将自前几天起在杉果起首优惠,感兴趣的玩家别忘了吃完午饭来看看啊~ 类型地址 sanic-ms Example 专门好评的银河城风格独立游戏《空洞骑士》在下一周三就要生产第二个免费扩张包“Grimm Troupe”了。 有关连接 sanic 在《北美洲卡手模拟2》不断经过DLC扩充游戏领域的还要,《美利坚合众国卡内衣模特拟》也在增多新的区域,最新DLC中追加的“新墨西哥”就是内部之一。 服务端 动用sanic异步框架,有较高的性能,可是使用不当会造成blocking, 对于有IO请求的都要选拔异步库。添加库要慎重 sanic使用uvloop异步驱动,uvloop基于libuv使用Cython编写,性能比nodejs还要高。 效用表明: 5.《那是我的大战》厂商小说上架杉果并圆满让利,波兰独自游戏大举进军国内喽 连带连接 asyncpg benchmarks 我们好!前天《荣耀战魂》开启了万圣节特别活动;《空洞骑士》第二个免费扩张包下周上线;《美利坚合众国卡服装模特拟》新墨西哥DLC发布;《城市:天际线-紫色之城》发售好评,体系折扣中;《这是自个儿的战乱》厂商游戏上架杉果,折扣同时拉开。 代码覆盖 coverage erase coverage run --source . -m sanic_ms tests coverage xml -o reports/coverage.xml coverage2clover -i reports/coverage.xml -o reports/clover.xml coverage html -d reports • coverage2colver 是将coverage.xml 转换成 clover.xml,bamboo需要的格式是clover的。 赏心悦目战魂》方今在杉果有售,近日游乐以7.5折出售,售价186元,通过Uplay激活后仍可与Steam好友游玩。 相关连接 unittest coverage 3.《弥利坚卡脚模拟》“新墨西哥”DLC发布 API接口 api文档使用swagger标准。 Example: from sanic_ms import doc @user_bp.post('/') @doc.summary('create user') @doc.description('create user info') @doc.consumes(Users) @doc.produces({'id': int}) async def create_user(request): data = request['data'] async with request.app.db.transaction(request) as cur: record = await cur.fetchrow( """ INSERT INTO users(name, age, city_id, role_id) VALUES($1, $2, $3, $4, $5) RETURNING id """, data['name'], data['age'], data['city_id'], data['role_id'] ) return {'id': record['id']} • summary: api概要 • description: 详细描述 • consumes: request的body数据 • produces: response的回来数据 • tag: API标签 • 在consumes和produces中传播的参数可以是peewee的model,会分析model生成API数据, 在field字段的help_text参数来表示援引对象 • http://host:ip/openapi/spec.json 获取生成的json数据 1.《荣耀战魂》万圣节将翻开特别活动,小兵变骷髅 可怜处理 对抛出的相当举办处理,重回统一格式 甭管PC玩家如故手游玩家都很熟谙的生存游戏《这是自家的战事》前几天就要上架杉果了,一同上架的还有发行商11 bit studios的别样数款游戏,包括《Anomaly》类别和《Beat Cop》等创作。 任务 创造task消费queue中对span,用于日志追踪 《城市:天际线》的新扩大包“褐色之城”已经在前些天上线,该扩充包为游戏新增了汪洋环保核心建筑和绿化政策,以前已经把嬉戏探讨透彻的玩家们又有新情节可玩了。 Zipkin Server 1514528423339.jpg 1514528479787.jpg 依照惯例,特别节日的位移当然也会掉落特别奖励,比如专门的衣裳、表情、处决动画等。(疑似有神采中仍能放出Persona) 启动前 @app.listener('before_server_start') async def before_srver_start(app, loop): queue = asyncio.Queue() app.queue = queue loop.create_task(consume(queue, app.config.ZIPKIN_SERVER)) reporter = AioReporter(queue=queue) tracer = BasicTracer(recorder=reporter) tracer.register_required_propagators() opentracing.tracer = tracer app.db = await ConnectionPool(loop=loop).init(DB_CONFIG) • 创建DB连接池 • 创建Client连接 • 成立queue, 消耗span,用于日志追踪 • 创立opentracing.tracer举行日志追踪 *关于“杉果游戏”:一家为国内单机玩家操碎了心的一日游代理发行平台。已与B社、卡普空、华纳、万代南梦宫等70余家中外厂商建立协作,致力于将生化危机、上古卷轴、辐射、蝙蝠侠、黑暗之魂等单机游戏带给中华玩家。* 日志 & 分布式追踪系统 利用官方logging, 配置文件为logging.yml, sanic版本要0.6.0及以上。JsonFormatter将日志转成json格式,用于输入到ES Enter OpenTracing: by offering consistent, expressive, vendor-neutral APIs for popular platforms, OpenTracing makes it easy for developers to add (or switch) tracing implementations with an O(1) configuration change. OpenTracing also offers a lingua franca for OSS instrumentation and platform-specific tracing helper libraries. Please refer to the Semantic Specification. Grimm Troupe没有品级或道具门槛,玩家可以在其它时刻探索这一职责线。 连带连接 aiohttp “Grimm Troupe”带来了一个全新的任务线,其中有新的boss、敌人和联盟登场,4个新的符文被投入到了游戏当中,此外还有一位据说可以为玩家们的剩余金钱找到用处的经纪人会出演。 分布式追踪系统 • OpenTracing是以Dapper,Zipkin等分布式追踪系统为遵照, 建立了联合的正规。 • Opentracing跟踪每一个呼吁,记录请求所经过的每一个微服务,以链条的措施串联起来,对分析微服务的性质瓶颈至关紧要。 • 利用opentracing框架,可是在出口时转换成zipkin格式。 因为大多数分布式追踪系统考虑到性能问题,都是应用的thrift举办通信的,本着简便,Restful风格的旺盛,没有应用RPC通信。以日记的法子出口, 可以接纳fluentd, logstash等日志收集再输入到Zipkin。Zipkin是支撑HTTP输入的。 • 转变的span先无阻塞的放入queue中,在task中消费队列的span。前期可以添加上采样频率。 • 对于DB,Client都加上了tracing 《荣耀战魂》”异世界的庆功宴“活动现已上线,九月2日前有爱的玩家可以为了特别掉落肝爆了。 特点 • 利用sanic异步框架,简单,轻量,高效。 • 运用uvloop为核心引擎,使sanic在成千上万场地下单机并发甚至不亚于Golang。 • 使用asyncpg为数据库驱动,举办数据库连接,执行sql语句执行。 • 利用aiohttp为Client,对任何微服务举行走访。 • 运用peewee为ORM,但是只是用来做模型设计和migration。 • 动用opentracing为分布式追踪系统。 • 使用unittest做单元测试,并且接纳mock来避免访问其他微服务。 • 利用swagger做API标准,能自动生成API文档。 按部就班规矩,《城市:天际线》的本体和后边推出的壮大包也在促销,游戏本体在杉果售价19元,比Steam国区22元的售价更是有益于。而娱乐在此以前生产的几款DLC也在打折中,往日还没上车的玩家可以考虑补上多少个好评扩张包了。 有关连接 peewee 客户端 选拔aiohttp中的client,对客户端举行了简便的卷入,用于微服务之间访问。 Don’t create a session per request. Most likely you need a session per application which performs all requests altogether. A session contains a connection pool inside, connection reusage and keep-alives (both are on by default) may speed up total performance. Example: @app.listener('before_server_start') async def before_srver_start(app, loop): app.client = Client(loop, url='http://host:port') async def get_role_by_id(request, id): cli = request.app.client.cli(request) async with cli.get('/cities/{}'.format(id)) as res: return await res.json() @app.listener('before_server_stop') async def before_server_stop(app, loop): app.client.close() 对于访问不同的微服务可以创建多少个例外的client,这样各类client都会keep-alives 装饰器logger @logger(type='method', category='test', detail='detail', description="des", tracing=True, level=logging.INFO) async def get_city_by_id(request, id): cli = request.app.client.cli(request) • type: 日志类型,如 method, route • category: 日志系列,默认为app的name • detail: 日志详细音讯 • description: 日志描述,默认为函数的注脚 • tracing: 日志追踪,默认为True • level: 日志级别,默认为INFO 发表评论 电子邮件地址不会被公开。 必填项已用*标注
__label__pos
0.655132
Ray Toal Ray Toal - 2 months ago 16x iOS Question Replacement for enumerateSubstringsInRange in Swift 3 I'm upgrading code from Swift 2 to Swift 3 and ran across this error: wordcount.swift:7:5: error: value of type 'String' has no member 'enumerateSubstringsInRange' line.enumerateSubstringsInRange(range, options: .ByWords) {w,,,_ in In Swift 2, this method comes from a String extension of which the compiler is aware. I have not been able to locate this method in the Swift 3 library. It appears in the documentation for Foundation here: https://developer.apple.com/library/ios/documentation/Cocoa/Reference/Foundation/Classes/NSString_Class/index.html#//apple_ref/occ/instm/NSString/enumerateSubstringsInRange:options:usingBlock: My entire script is: import Foundation var counts = [String: Int]() while let line = readLine()?.lowercased() { let range = line.characters.indices line.enumerateSubstringsInRange(range, options: .ByWords) {w,_,_,_ in guard let word = w else {return} counts[word] = (counts[word] ?? 0) + 1 } } for (word, count) in (counts.sorted {$0.0 < $1.0}) { print("\(word) \(count)") } It works with Swift 2.2 (modulo the changes I have already made for Swift 3, such as lowercase -> lowercased and sort -> sorted ) but fails to compile with Swift 3. And very strangely, neither the Swift 3 command line compiler nor the Swift Migration assistant in XCode 8 Beta suggests a replacement, as it does for many other renamed methods. Perhaps enumerateSubstringsInRange is deprecated or its parameter names changed? Answer If you type str.enumerateSubstrings in a Playground, you'll see the following as a completion option: enumerateSubstrings(in: Range<Index>, options: EnumerationOptions, body: (substring: String?, substringRange: Range<Index>, enclosingRange: Range<Index>, inout Bool) -> ()) In addition to addressing the new enumerateSubstrings(in:options:body:) syntax, you need to also change how you get the range for the string: import Foundation var counts = [String: Int]() while let line = readLine()?.lowercased() { let range = line.startIndex ..< line.endIndex line.enumerateSubstrings(in: range, options: .byWords) {w,_,_,_ in guard let word = w else {return} counts[word] = (counts[word] ?? 0) + 1 } } for (word, count) in (counts.sorted {$0.0 < $1.0}) { print("\(word) \(count)") } Comments
__label__pos
0.998394
1 $\begingroup$ On a bounded Riemannian manifold without boundary, is it true that the norms $$\lVert u \rVert_{L^2(M)} + \lVert \Delta u \rVert_{L^2(M)}$$ is equivalent to the full $H^2$ norm $\lVert u \rVert_{H^2(M)}$? Can i have a reference to this please? Thanks $\endgroup$ • $\begingroup$ I think that is true if $u\in H^2_0(M)$ from Sobolev inequality $\endgroup$ – Lion Feb 27 '14 at 13:39 1 $\begingroup$ Yes, that's actually true. First, note that the inequality $$ ||u||_{L^2} + ||\Delta u ||_{L^2} \leq c ||u||_{H^2} $$ is always true (the constant may vary due to your definition though). Hence it remains to show that $$ ||u||_{H^2} \leq C \left( ||u||_{L^2} + ||\Delta u ||_{L^2} \right). $$ This follows from the fact that the Laplacian is elliptic and can e.g. be found in the book "Spin Geometry" by H. Blaine Lawson and Marie-Louise Michelsohn, see Theorem 5.2 (iii) in Chapter III, page 193. $\endgroup$ Your Answer By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.948102
RenderWithShader insanity Renderwithshader and shader replacement commands insanely triples or quadriples the scene triangle and vertex count. In a simple scene i have the builtin unity cube, and the skybox. So, i assume the vertices count should be maximum 16 or something, but my statistics window shows there are 496 vertices. EDIT: and i render the scene with a replacement shader to get an image of worldspace normals. That also doubles the vertices number. Is this a bug? Replacement shaders renders the entire scene again, assuming you’re not filtering it by layer or shader tag. So doubling the vertex count (as in, the number of rendered vertices) is normal, and triple for 3 replacement shaders, quadruple for 4, etc… I’m not sure why it jumps as high as 496 verts, though. I’d assume 32 verts for a cube (4 unique vertices per side) and another 32 verts for skybox (being a normal-flipped cube) so a total of 64 verts… that’s not a number that goes evenly into 496. Exactly, that is the problem. Even though the unity buildin cube may not be optimised and doesnt share the vertices and even if it has 32 vertices, and even if the skybox has the same problem, 496 is just a random insane number. I do render the whole scene once with replacement, and it if renders twice, it should only double the amount. Very strange indeed. mh. depending on the shader and scene setup/context, renderWithShader might trigger several passes. soo, try using the following shader as your replacement shader and see if that one does the magic: it should only double vert/tri count (or even less if your base shader is more complex). Shader "1PassLightmapped" { Properties { _MainTex ("Base", 2D) = "white" {} } CGINCLUDE struct v2f { half4 pos : SV_POSITION; half2 uv : TEXCOORD0; half2 uv2 : TEXCOORD1; }; #include "UnityCG.cginc" sampler2D _MainTex; ENDCG SubShader { Tags { "RenderType"="Opaque" } LOD 140 Pass { CGPROGRAM half4 unity_LightmapST; sampler2D unity_Lightmap; half4 _MainTex_ST; v2f vert (appdata_full v) { v2f o; o.pos = mul (UNITY_MATRIX_MVP, v.vertex); o.uv = TRANSFORM_TEX(v.texcoord, _MainTex); o.uv2 = v.texcoord1 * unity_LightmapST.xy + unity_LightmapST.zw; return o; } fixed4 frag (v2f i) : COLOR0 { fixed4 tex = tex2D (_MainTex, i.uv); #ifdef LIGHTMAP_ON fixed3 lm = DecodeLightmap (tex2D(unity_Lightmap, i.uv2)); tex.rgb *= lm; #else tex.rgb *= 0.65; #endif return tex; } #pragma vertex vert #pragma fragment frag #pragma fragmentoption ARB_precision_hint_fastest #pragma multi_compile LIGHTMAP_OFF LIGHTMAP_ON ENDCG } } FallBack Off } Ole: Same amount of tris and vertices, doesnt make any difference. This is my simple worldnormals replacement shader: Shader "Hidden/Aubergine/SceneNormals" { SubShader { Pass { Lighting Off Fog { Mode Off } CGPROGRAM #pragma exclude_renderers gles #pragma vertex vert struct v2f { float4 pos : SV_POSITION; fixed4 col : COLOR; }; v2f vert (float4 vertex : POSITION, float3 normal : NORMAL) { v2f o; o.pos = mul(UNITY_MATRIX_MVP, vertex); o.col.xyz = normal * 0.5 + 0.5; o.col.w = 1.0; return o; } ENDCG } } } And here is how i use the outcome as a rendertexture in an image effect which has 2 passes (1st pass saves a render texture and 2nd pass combines) using UnityEngine; using System.Collections; public class RenderWithShader : PostProcessBase { public Shader replacementShader; void OnRenderImage (RenderTexture source, RenderTexture destination) { RenderTexture sceneNormals = RenderTexture.GetTemporary (source.width, source.height, 0, RenderTextureFormat.ARGB32); transform.camera.SetReplacementShader(replacementShader, null); Graphics.Blit (source, sceneNormals, base.material, 0); base.material.SetTexture ("_BumpTex", sceneNormals); transform.camera.ResetReplacementShader(); RenderTexture.ReleaseTemporary (sceneNormals); Graphics.Blit (source, destination, base.material, 1); } } oh. making some assumptions about your scene setup, i don’t think this works as intended. also, it won’t achieve the desired world space normals output. here is how it should work. modified replacement shader: Shader "Hidden/Aubergine/SceneNormals" { SubShader { Tags { "RenderType"="Opaque" } Pass { Lighting Off Fog { Mode Off } CGPROGRAM #pragma exclude_renderers gles #pragma vertex vert #pragma vertex frag #include "UnityCG.cginc" struct v2f { float4 pos : SV_POSITION; fixed4 col : COLOR; }; v2f vert (appdata_base v) { v2f o; o.pos = mul(UNITY_MATRIX_MVP, v.vertex); o.col.xyz = v.normal * 0.5 + 0.5; o.col.w = 1.0; return o; } fixed4 frag( v2f i ) : COLOR { return i.col; } ENDCG } } } and the modified camera script using UnityEngine; using System.Collections; public class RWS : MonoBehaviour { public Shader replacementShader; void OnRenderImage (RenderTexture source, RenderTexture destination) { RenderTexture sceneNormals = RenderTexture.GetTemporary (source.width, source.height, 24, RenderTextureFormat.ARGB32); transform.camera.targetTexture = sceneNormals; transform.camera.RenderWithShader(replacementShader, ""); transform.camera.targetTexture = null; // display contents in game view Graphics.Blit (sceneNormals, destination); RenderTexture.ReleaseTemporary (sceneNormals); } } the poly / vert / tri # of this setup is 2 x geometric scene detail (render twice) + 2 tris for first fullscreen blit (implicit in OnRenderImage) + 2 tris for the fullscreen blit inside the script displaying the temp buffer. so exactly as expected :-). Well, i dont see any differences between your shader and my shader(it is actually the very same shader in the Unity documents) It renders the normals without the fragment. Also for the c# script, after using RenderWithShader command, it just sticks there and doesnt reset itself. There is no command like; transform.camera.ResetReplacementShader(); to do another graphics blit to render the screen normally after using renderwithshader. Anyhow, on to the topic, the vertices math seems right, i do blit twice. RenderWithShader doesn’t need a reset command, it’s a one-off render command. You only need to reset it if you use SetReplacementShader (which is a permenant setting), then ResetReplacementShader will set it back. Well, it doesnt work as a one-shot render in my case. I will double check if there is anything wrong with my code.
__label__pos
0.958866
写一个带忽略名单的打包压缩脚本 人生苦短,我用Python image.png 前几天朋友让我把手里的一个Android demo项目打个压缩包发给他,于是我试着直接对项目进行打包,好家伙,19M那么大,明明没几行代码.肯定是app/build文件夹惹的祸,一看才发现它有64M那么大,然后因为build是在app这个目录下,压缩的时候只能把app这个文件夹整个打包进去,可是我又不想要app目录下的build文件夹,于是我选择用Python写一个个性化压缩工具.只需要双击脚本就可以自动打包好这个项目. 首先我通过搜索找到Python有个自带的标准库 zipfile 通过这个库就可以把文件和目录打包进zip里. 首先引包,然后创建一个压缩包对象 import zipfile zip = zipfile.ZipFile('test.zip',mode='w',compression=zipfile.ZIP_BZIP2) 跟open()差不多的,test.zip 是文件名,mode='w' 就是写入的意思,也可以写 'a' 表示追加,compression表示压缩方法. 然后向压缩包添加一个文件,添加完了后一定要close() zip.write('README.md') zip.close() 好了,zipfile库的基础用法就这样了,当然你也可以用它解压压缩包,下面开始编写我的个性化压缩工具 #!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'JethroCup' import zipfile import os output='out.zip' #输出文件名 ignore = ['build'] #忽略的文件或目录 ignore.append(output) #忽略输出的文件名,如果不忽略将产生死循环 代码中定义了一个ignore变量,就是我们的忽略名单了,当我们的文件遍历方法遍历到名单中的文件或文件夹名时将会当作没看到这个文件. 上文件遍历方法 def dfs_get_zip_file(input_path,result): ''' 遍历文件和目录的路径 input_path:要遍历那个文件夹 result:存储路径的list,遍历到的路径将存储在这个变量中 ''' files = os.listdir(input_path) for file in files: #筛选出忽略列表中的文件和目录 if file in ignore: continue if os.path.isdir(input_path+'/'+file): dfs_get_zip_file(os.path.join(input_path,file),result) else: result.append(os.path.join(input_path,file)) 遍历出了所有文件路径后,我们就将他们一一write进zip中 def autozip(): ''' 自动打包压缩 ''' zip = zipfile.ZipFile(output,mode='w',compression=zipfile.ZIP_BZIP2) filelist=[] #存储遍历到的路径 dfs_get_zip_file('.',filelist) for file in filelist: print(file) zip.write(file) zip.close() OK,最后 if __name__ == "__main__": autozip() input('已压缩至 {}'.format(output)) 双击6666双击运行试试看效果 image.png 添加新评论
__label__pos
0.695856
What is a Good Upload Speed for Live Streaming When it comes to live streaming, having a sufficient upload speed is essential for delivering high-quality video to your audience. But what is a good upload speed for live streaming? The upload speed required for live streaming depends on several factors, including the resolution and bitrate of the video, the number of viewers, and the platform you are using. For example, if you are streaming a video in 1080p resolution at a bitrate of 3,500 kbps (kilobits per second), you will need an upload speed of at least 3.5 Mbps (megabits per second) to ensure a stable and reliable stream. If you are streaming to a large number of viewers or using a platform that requires higher bitrates, you will need an even faster upload speed. Here are some general guidelines for the minimum upload speeds required for live streaming: • 720p video at 3,500 kbps: 3.5 Mbps • 1080p video at 4,500 kbps: 4.5 Mbps • 4K video at 13,000 kbps: 13 Mbps It's worth noting that these are minimum requirements and that you may need a faster upload speed for a high-quality stream, especially if you are streaming to a large number of viewers or using a platform that requires higher bitrates. To determine your upload speed, you can use an online speed test tool such as Speedtest or Fast.com. Simply connect to your router or modem and run the test to get a measure of your current upload speed. If your upload speed is not sufficient for live streaming, there are several steps you can take to improve it: • Upgrade your internet plan: If you are on a lower-tier internet plan, upgrading to a higher-speed plan may improve your upload speed. • Use a wired connection: A wired connection, such as Ethernet, can be faster and more stable than a wireless connection. If you are using a wireless connection, consider switching to a wired connection to improve your upload speed. • Optimize your network: There are several steps you can take to optimize your network and improve your upload speed, such as using a newer router, disabling bandwidth-intensive activities, and reducing the number of devices connected to the network. In conclusion, a good upload speed for live streaming depends on several factors, including the resolution and bitrate of the video, the number of viewers, and the platform you are using. As a general rule, you will need a minimum upload speed of at least 3.5 Mbps for 720p video, 4.5 Mbps for 1080p video, and 13 Mbps for 4K video. To determine your current upload speed and take steps to improve it if necessary, you can use an online speed test tool and optimize your network and connection. Chat with Us
__label__pos
0.928999
首页 新闻 搜索 专区 学院 jsPlumb在html与cshtml显示问题 0 悬赏园豆:5 [待解决问题] 最开始我用idea做了一个流程图的页面可以显示出来,可是为啥我用.net下的cshtml却显示不出连线,html和js代码我都是从idea复制的 如下: <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Title</title> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta http-equiv="Access-Control-Allow-Origin" content=""> <style> .start { background-color: #fc8801; border-radius: 25em; -moz-border-radius: 25em; height: 38px; width: 92px; / padding: 5px; / / padding-left: 18px; / padding-top: 11px; / line-height: 20px; */ text-align: center; height: 32px; float: left; } .approve { background-color: #00b4fe; width: 92px; /* padding: 5px; */ /* padding-left: 18px; */ padding-top: 11px; /* line-height: 20px; */ text-align: center; height: 32px; float: left; } .copy { background-color: #ffd966; width: 92px; /* padding: 5px; */ /* padding-left: 18px; */ padding-top: 11px; /* line-height: 20px; */ text-align: center; height: 32px; float: left; } .end { background-color: #fc8801; border-radius: 25em; -moz-border-radius: 25em; height: 38px; width: 92px; /* padding: 5px; */ /* padding-left: 18px; */ padding-top: 11px; /* line-height: 20px; */ text-align: center; height: 32px; float: left; } </style> </head> <body> <div id="diagramContainer"> 111 <div style="margin-top: 200px"> <div class="start" id="start"><span>开始</span></div> <div style="margin-left: 100px" class="approve" id="step1">审批1</div> <div style="margin-left:150px" class="approve" id="step2">审批2</div> <div style="margin-left: 170px" class="copy" id="copy">分发</div> <div style="margin-left: 200px" class="end" id="end">结束</div> </div> </div> <script src="~/js/jquery/jquery-1.12.4.min.js"></script> <script src="~/js/jquery/jquery-ui.js"></script> <script src="~/js/jquery/jsplumb.min.js"></script> <script> var jsText = 'jsPlumb.ready(function () {\n' + ' var flowConnector = {\n' + ' // connector: ["Bezier", { curviness: 0 }],\n' + ' //anchors: ["BottomCenter", "TopCenter"],\n' + ' connector:'Straight',\n' + ' paintStyle: { lineWidth: 2, strokeStyle: "#61B7CF", fillStyle: "transparent" },\n' + ' //paintStyle: { lineWidth: 2, strokeStyle: "#61b7cf", joinstyle: "round", outlineColor: "white", outlineWidth: 2 },\n' + ' overlays: [["Arrow", { width: 10, length: 10, location: 1 }]],\n' + ' endpoint: ["Dot", { radius: 1 }]\n' + ' };\n' + ' var returnConnector = {\n' + ' connector: "Bezier",\n' + ' //anchors: ["BottomCenter", "TopCenter"],\n' + ' paintStyle: { lineWidth: 2, strokeStyle: "red", fillStyle: "transparent" },\n' + ' //paintStyle: { lineWidth: 2, strokeStyle: "#61b7cf", joinstyle: "round", outlineColor: "white", outlineWidth: 2 },\n' + ' overlays: [["Arrow", { width: 10, length: 10, location: 1 }]],\n' + ' endpoint: ["Dot", { radius: 1 }]\n' + ' };\n' + ' var connection1= jsPlumb.connect({\n' + ' source: 'start',\n' + ' target: 'step1',\n' + ' anchor: ['Right', 'Left']\n' + ' },flowConnector)\n' + ' var connection2= jsPlumb.connect({\n' + ' source: 'step1',\n' + ' target: 'step2',\n' + ' },flowConnector)\n' + ' var connection3= jsPlumb.connect({\n' + ' source: 'step2',\n' + ' target: 'copy',\n' + ' },flowConnector)\n' + ' var connection4= jsPlumb.connect({\n' + ' source: 'copy',\n' + ' target: 'end',\n' + ' },flowConnector)\n' + ' connection1.setLabel(<span style=\'display:block;padding:10px;opacity: 0.8;filter: alpha(opacity=80);font-family: helvetica;height:auto;background-color:white;border:1px solid #346789;text-align:center;font-size:12px;color:black;border-radius:0.5em;\'>通过</span>);\n' + ' var returnStep1= jsPlumb.connect({\n' + ' source: 'step1',\n' + ' target: 'start',\n' + ' anchor: ['Top', 'Top']\n' + ' },returnConnector)\n' + ' returnStep1.setLabel(<span style=\'display:block;padding:10px;opacity: 0.8;filter: alpha(opacity=80);font-family: helvetica;height:auto;background-color:white;border:1px solid #346789;text-align:center;font-size:12px;color:black;border-radius:0.5em;\'>通不过</span>);\n' + ' var returnStep2= jsPlumb.connect({\n' + ' source: 'step2',\n' + ' target: 'step1',\n' + ' anchor: ['Top', 'Top']\n' + ' },returnConnector)\n' + ' returnStep2.setLabel(<span style=\'display:block;padding:10px;opacity: 0.8;filter: alpha(opacity=80);font-family: helvetica;height:auto;background-color:white;border:1px solid #346789;text-align:center;font-size:12px;color:black;border-radius:0.5em;\'>通不过</span>);\n' + ' var returnStep3= jsPlumb.connect({\n' + ' source: 'step2',\n' + ' target: 'start',\n' + ' anchor: ['Top', 'Top']\n' + ' },returnConnector)\n' + ' returnStep3.setLabel(<span style=\'display:block;padding:10px;opacity: 0.8;filter: alpha(opacity=80);font-family: helvetica;height:auto;background-color:white;border:1px solid #346789;text-align:center;font-size:12px;color:black;border-radius:0.5em;\'>通不过</span>);\n' + '\n' + ' })' var jscode = new Function(jsText)(); </script> </body> </html> 我在用cshtml页面打开是没有连线的,用idea打开是有的,是不是cshtml与html有啥不一样 灬丶的主页 灬丶 | 初学一级 | 园豆:71 提问于:2020-03-29 15:09 < > 分享 清除回答草稿    您需要登录以后才能回答,未注册用户请先注册
__label__pos
0.553063
Welcome to 20i Support Web Hosting Knowledge Base How do I optimize and reduce the size of my database? Database maintenance is something that every database/web administrator should practise. An optimized and well-managed database can be the difference between a responsive website, and one that appears sluggish, simply because queries are able to be fetched far faster. Magento Magento databases may often increase in size, with no discernible, or apparent reason as to why! More often than not, this happens because of log data retained by the install that may not be being automatically cleared. It's common to see a large reduction in the size of a database after cleaning log data. 1) To prune log data within Magento, you'll need to log in to the Magento Admin facility. 2) Then select System > Configuration. 3) Under Advanced, select System. 4) Under Log, select Yes under Enable Log Cleaning. 5) Set the frequency for which you would like log data retaining for under Save Log, Days. For most users, we would recommend only retaining log data for up to a week, which is standard practise. Going forward, Magento will only retain log data for the length of time set. If you find that your database is still larger than wanted, a reduction in the number of days may be needed.  WordPress Over time, a WordPress database can become particularly bloated. During the life of a WordPress install, deactivated plugins and themes, amongst a number of other things, can take up unnecessary space. Removing them can increase response times, simply because WordPress is able to query the database and its tables far more quickly. It doesn't have to sift through data that is no longer required, or large, cluttered datasets. phpMyAdmin can be used to sort database tables by size (by selecting the Size column): Database tables in phpMyAdmin By doing this, you can see which tables within your WordPress install are the largest, and which potentially need removing, maintaining, or simply reducing in size. Plugins: For example, we can look for tables that relate to deactivated plugins, and remove them as they’re no longer required. Some deactivated plugins can have very large tables in place. This is because database tables and data are still in place should you ever wish to reactivate the plugin, and/or theme in question. You can remove a table within phpMyAdmin by using the DROP TABLE function from the available drop-down: phpMyAdmin Drop Table Unassociated tags: From time to time, you may have a number of tags that are not associated with any posts. If you’ve removed a large number of posts/articles, these tags may still be present in the database. These orphan tags can be removed with the following queries: Autosaves: Every time you write a post or an article within WordPress, the in-built autosave feature will make a save within your database. Depending on what you’re writing, these autosaves can be quite large. Therefore, should these autosaves no longer be required, you can make quite a reduction in database size by removing these. Autosaves can be removed with the following query: Trash days: Another useful trick is to increase the frequency in which trash is automatically deleted from your WordPress installation. This can be achieved by defining EMPTY_TRASH_DAYS within your wp-config.php file as follows: The end value is how often, in days, you would like this to happen. So in this instance, deletion of trash would occur every seven days. Transients: Transients offer a means to store cached data within a database temporarily (thus their name). Whilst this can be useful for reducing the amount of queries a site makes, you may often find that WordPress hasn't removed transients that have expired; transients having specific expiration dates. Consequently, this can cause the database to become bloated. Expired transients can be removed by performing the following query: General Optimization Indexes Indexes can be extremely useful for those with large databases, querying large datasets. If created and used correctly, indexes can make retrieval of records faster. If a table contains hundreds of thousands of records, even the fastest of database servers will struggle to perform an intended query, if the database isn't indexed properly. An index can be created with the CREATE INDEX statement after a table has been created, or it can be created at the time of the table creation with the CREATE TABLE statement. For example, let us imagine we have the following table (albeit, with many more records): If we wish to obtain the email address for Debbie Elliott, our query would need to go through every record until it is able to find the customer with the first and last name we are looking for. This is inefficient. To make the retrieval of records faster, we can create an index, or indexes: If we then use EXPLAIN, we can check the index or indexes being used for the SELECT statement we intend to run: Database table example two column Going forward, MySQL will be able to rely on this index to retrieve the record(s) more quickly, by checking fewer rows. We can contrast this with the first_name table. As the following EXPLAIN shows, MySQL is examining every row in the table (five), whereas our previously created index is only examining two, the latter being much more efficient: Efficient database So, as you can see, when used correctly, indexes can make a real difference and improve performance dramatically. Optimize table The OPTIMIZE TABLE command can be useful in the reduction of table sizes. OPTIMIZE TABLE can be thought of as a defragmentation, and will essentially recreate the table and reclaim any unused space. For example, if a large part of a table has been removed (large numbers of rows deleted), these are still maintained, with subsequent operations still “seeing” those rows. You can use OPTIMIZE TABLE to reclaim this space. To achieve this, log in to PHPMyAdmin for the database in question via Manage Hosting > Manage > PHPMyAdmin. From this point, we can then select tables for which we wish to optimize, and then select the option available from the dropdown menu: Optimize table in phpMyAdmin
__label__pos
0.897734
7 form.inc drupal_get_form($form_id) 4.7 form.inc drupal_get_form($form_id, &$form, $callback = NULL) 5 form.inc drupal_get_form($form_id) 6 form.inc drupal_get_form($form_id) Returns a renderable form array for a given form ID. This function should be used instead of drupal_build_form() when $form_state is not needed (i.e., when initially rendering the form) and is often used as a menu callback. Parameters $form_id: The unique string identifying the desired form. If a function with that name exists, it is called to build the form array. Modules that need to generate the same form (or very similar forms) using different $form_ids can implement hook_forms(), which maps different $form_id values to the proper form constructor function. Examples may be found in node_forms(), and search_forms(). ...: Any additional arguments are passed on to the functions called by drupal_get_form(), including the unique form constructor function. For example, the node_edit form requires that a node object is passed in here when it is called. These are available to implementations of hook_form_alter() and hook_form_FORM_ID_alter() as the array $form_state['build_info']['args']. Return value The form array. See also drupal_build_form() Related topics 47 calls to drupal_get_form() authorize.php in ./authorize.php Administrative script for running authorized file operations. block_admin_display in modules/block/block.admin.inc Menu callback for admin/structure/block. book_outline in modules/book/book.pages.inc Menu callback: Shows the outline form for a single node. comment_admin in modules/comment/comment.admin.inc Menu callback; present an administrative comment listing. comment_confirm_delete_page in modules/comment/comment.admin.inc Page callback for comment deletions. ... See full list 34 string references to 'drupal_get_form' aggregator_menu in modules/aggregator/aggregator.module Implements hook_menu(). ajax_forms_test_menu in modules/simpletest/tests/ajax_forms_test.module Implements hook_menu(). batch_test_menu in modules/simpletest/tests/batch_test.module Implement hook_menu(). block_menu in modules/block/block.module Implements hook_menu(). book_menu in modules/book/book.module Implements hook_menu(). ... See full list File includes/form.inc, line 122 Functions for form and batch generation and processing. Code function drupal_get_form($form_id) { $form_state = array(); $args = func_get_args(); // Remove $form_id from the arguments. array_shift($args); $form_state ['build_info']['args'] = $args; return drupal_build_form($form_id, $form_state); } Comments In drupal 7 drupal_get_form() return Form API array for HTML output drupal_render() to be called. See http://drupal.org/node/224333#unrendered $form_id can not start with an underscore. My function was called something like _mymodule_form. The form was rendered but the submit function was not called. Renaming it to mymodule_form, it works fine. It is possible to call drupal_get_form('some_form', $arg1, $arg2); which will pass the arguments $arg1 and $arg2 (or however many arguments you pass) to the function some_form() like this: some_form($form, &$form_state, $arg1, $arg2); the function that does the work for this is drupal_retrieve_form() Yes,that is possible,I tried in D7 and it works!!! :) Embedding a node creation form has gotten more complex as you need to use form_load_include. If you don't do this (and only include node.pages.inc via module_load_include) ajax on 'add another' fields will break with 500 errors. <?php global $user; $node = (object) array('uid' => $user->uid, 'name' => (isset($user->name) ? $user->name : ''), 'type' => 'FOO', 'language' => LANGUAGE_NONE);   $form_state['build_info']['args'] = array($node);   form_load_include($form_state, 'inc', 'node', 'node.pages');  return drupal_build_form('FOO_node_form', $form_state); ?> Thanks, that saved me a lot of screaming at the monitor. I was getting quite hoarse. In my case, the problem was with embedding the form for another entity type in a page, but the same solution applies. For ECK entity forms, the following worked for me (copying out of eck.entity.inc). Replace ENTITY_TYPE and BUNDLE with the relevant machine names: $entity = entity_create('ENTITY_TYPE', array('type' => 'BUNDLE'); drupal_get_form('eck__entity__form_add_ENTITYTYPE_BUNDLE', $entity); I am grateful to you for this information. at the risk of repeating what's already been said... THANK YOU for that hint about form_load_include ! saved my sorry ass, that did! I registered a custom route and was trying to show a node form like following: <?php global $user;   module_load_include('inc', 'node', 'node.pages'); $node = (object) array('uid' => $user->uid, 'name' => (isset($user->name) ? $user->name : ''), 'type' => 'foo', 'language' => LANGUAGE_NONE); return drupal_get_form('foo_node_form', $node); ?> and my file upload widget was not working. As soon as I was trying to upload a file, the upload widget itself was disappearing! Then I google and found your solution which worked like magic! Thanks so much! it would be really great if there was a quick explaination! The code works but I didn't really learn much! Thanks in advance! thank you very very much :) i spend all day to get things works with my drupal_get_form function. Finally with your code it worked. If its a node, you dont need to create the node object by hand, you could use: <?php $content_type = 'the_content_type'; module_load_include('inc', 'node', 'node.pages'); $form = node_add($content_type); ?> I confirm. This worked for me. Maybe it's a bug that you can't do this with a simple node_add('NODETYPE') call when ajax is in the game. This is a huge pain for those who get stuck on this. Anyway, thank you very much for this snippet. Hi, I want to thank you very much cause ajax is working properly. Thanks for sharing. <?php function mymodule_menu(){   ...   $items['abc'] = array(     'type' => MENU_CALLBACK,     'page callback' => 'drupal_get_form',     'page arguments' => array('add_component_form'),     'access arguments' => array('access content')   );   return $items; } function add_component_form($form, &$form_state){   $arg1 = $form_state['build_info']['args'][0];   ... } ?> When path 'abc/def' is requested, the $arg1 will be 'def'. I have used this code and it does not work for me:- function test_menu(){ $items = array(); $items['test'] = array( 'title' => 'Form validation with ajax', 'description' => 'testing form validation and ajax dropdown with ajax', 'access callback' => 'user_access', 'access arguments' => array( 'validate ajax form' ), 'page callback' => 'drupal_get_form', 'page arguments'=> array( 'validate_ajax_form', 'product_form' ) ); return $items; } /* * implements hook_form */ function validate_ajax_form( $form, &$form_state ){ $product_groups = array('' => 'Select product group'); $form['product_group_id'] = array( '#type' => 'select', '#options' => $product_groups ); return $form; } function product_form( $form, &$form_state ){ $form['name'] = array( '#type' => 'textfield', '#default_value' => $name ); return $form; } But it does not print two forms individually. current $product_groups = array('' => 'Select product group'); $form['product_group_id'] = array( '#type' => 'select', '#options' => $product_groups new $product_groups = array('' => 'Select product group'); $form['product_group_id'] = array( '#type' => 'select', '#options' => $product_groups['']; I am calling drupa_get_form() as below, and it returns just 'Array' instead of returning the form array. $output = drupal_get_form('my_function', arg1); ... ... return $output; Please help me with this.... How are you displaying the returned array from drupal_get_form()? If you're using kpr() or dsm() in the Devel module, your returned result may be too large to display with Krumo. ... return drupal_render($output); Use \Drupal::formBuilder()->getForm('Drupal\mymodule\MyModuleForm'); More info at https://www.drupal.org/node/2117411 grateful for the assistance as well. I was including user_profile_form in a block to enable file_uploads for a certain class of users. Absolutely have to include user.pages.inc in the form_state data in order for ajax to refresh the form after file upload or remove!! $block['subject'] = t(''); $account = user_load( $user->uid ); $form_state['build_info']['args'] = array( $account ); form_load_include($form_state, 'inc', 'user', 'user.pages'); $block['content'] = drupal_build_form('user_profile_form', $form_state); return $block; In truth one must render the form. corrected block_view snippet: $block['subject'] = t(''); $account = user_load( $user->uid ); $form_state['build_info']['args'] = array( $account ); form_load_include($form_state, 'inc', 'user', 'user.pages'); $block['content'] = drupal_render( drupal_build_form('user_profile_form', $form_state) ); return $block;
__label__pos
0.503034
02 Nov, 2011, arholly wrote in the 1st comment: Votes: 0 Hello: I'm having a serious problem with my note system and it is a hot mess. The way the system was designed, our Note, Background, Fact, and Article system all use the note editors. After I thought I fixed the article system (a few threads back), things were going good. Then, really all of a sudden, it seems the articles replaced everything in the notes and backgrounds. Now, for some reason, it has not replaced anything in the facts, but that might be because I haven't added any facts. It's like it is loading the article into everything. I can delete them and it does not change note.txt file, bg.txt file, etc… I even created a command to force the saving of those items and it doesn't change the file. So, I tried deleting the bg.txt file (for example), where backgrounds are saved to. Then, I created a test command (below), to force it to save. And it saved the freakin' article over it. So, it has to be the way it is saving notes, right? void do_save_info( CHAR_DATA *ch, char *argument ) { save_notes(NOTE_BACKGROUND); save_notes(NOTE_KNOWLEDGE); } This is the save_note function. Is something wrong? void save_notes(int type) { FILE *fp; char *name; NOTE_DATA *pnote; switch (type) { default: return; case NOTE_NOTE: name = NOTE_FILE; pnote = note_list; break; case NOTE_BACKGROUND: name = BG_FILE; pnote = bg_list; break; case NOTE_KNOWLEDGE: name = KNOW_FILE; pnote = know_list; break; case NOTE_ARTICLE: name = NEWS_FILE; pnote = news_list; break; } fclose( fpReserve ); if ( ( fp = fopen( name, "w" ) ) == NULL ) { perror( name ); } else { for ( pnote = news_list; pnote != NULL; pnote = pnote->next ) { if(type == NOTE_NOTE) { fprintf( fp, "Sender %s~\n", pnote->sender); fprintf( fp, "Date %s~\n", pnote->date); fprintf( fp, "Stamp %ld\n", pnote->date_stamp); fprintf( fp, "To %s~\n", pnote->to_list); fprintf( fp, "Subject %s~\n", pnote->subject); } else if(type == NOTE_BACKGROUND || type == NOTE_KNOWLEDGE) { fprintf( fp, "Author %s~\n", pnote->sender); fprintf( fp, "Date %s~\n", pnote->date); fprintf( fp, "Stamp %ld\n", pnote->date_stamp); fprintf( fp, "Keyword %s~\n", pnote->to_list); fprintf( fp, "Diff %s~\n", pnote->subject); } else { fprintf( fp, "Author %s~\n", pnote->sender); fprintf( fp, "Date %s~\n", pnote->date); fprintf( fp, "Stamp %ld\n", pnote->date_stamp); fprintf( fp, "Categ %s~\n", pnote->to_list); fprintf( fp, "Subject %s~\n", pnote->subject); } sprintf( log_buf, "pnote-> subject = %s, pnote-> successes =%d", pnote->subject, pnote->successes); log_string( log_buf ); fprintf( fp, "Success %d~\n", pnote->successes); fprintf( fp, "Text\n%s~\n", pnote->text); } fclose( fp ); fpReserve = fopen( NULL_FILE, "r" ); return; } } I'm really confused by this and it has cost me some amount of work loss, so I'm really trying to get this to work right. 02 Nov, 2011, David Haley wrote in the 2nd comment: Votes: 0 It could also be the various linked lists or the filenames. Impossible to tell from just that. 02 Nov, 2011, arholly wrote in the 3rd comment: Votes: 0 OK, so how would I tell? This is the file name information. #define NOTE_FILE "../data/notes.txt" /* For 'notes'*/ #define BG_FILE "../data/bg.txt" /* For 'backgrounds'*/ #define KNOW_FILE "../data/know.txt" /* For 'backgrounds'*/ #define NEWS_FILE "../data/news.txt" /* For 'articles'*/ #define PAPER_FILE "../data/paper.txt" /* For 'newspapers'*/ 02 Nov, 2011, David Haley wrote in the 4th comment: Votes: 0 You'll have to see how you're initializing and maintaining those linked lists. My guess is that pointers are being mixed up somewhere and you're storing articles in your notes list. 02 Nov, 2011, arholly wrote in the 5th comment: Votes: 0 Which means what? I'm not a terribly well-learned programmer (learning on the job type). Any help would be great. 02 Nov, 2011, arholly wrote in the 6th comment: Votes: 0 Err…OK, I think I got it fixed. for ( pnote = news_list; pnote != NULL; pnote = pnote->next ) { if(type == NOTE_NOTE) { The first line was specifying news_list, so it was going and repopulating information that way. I removed that (which is the way it is in stock rom) and it seemed to have fixed the problem. Thanks though David. 02 Nov, 2011, David Haley wrote in the 7th comment: Votes: 0 Oh yeah. You were picking the right pnote list, but then discarding it with that for loop. I missed that with my quick skim… I saw the first part and assumed you were using that pnote assignment later. 02 Nov, 2011, arholly wrote in the 8th comment: Votes: 0 Yeah, me too. Sometimes I forgot to go compare things in ROM and then see the differences what might work to fix. I appreciate the help though. 0.0/8
__label__pos
0.793248
0 Let A = ${a_1,...,a_n}$ be a set of numbers. We can assume all elements of A are integers. Is there any efficient way to partition A into two sets B = ${b_1,...,b_k}$ and C = ${c_1,...,c_l}$ such that $|(b_1...b_k) - (c_1...c_l)|$ is minimal? Is the problem anything easier if we let A be a set of strictly positive integers? What if we only let prime numbers? flag I assume you want the absolute value of prod(B) - prod(C)? Otherwise (at least for positive integers) it's trivial. – Harrison Brown Dec 2 2009 at 1:31 You are right. I knew I missed something when writing down the problem.. Thanks – Jernej Dec 2 2009 at 1:39 2 Answers 2 I suspect that it's NP-hard even to check whether you can get prod(B) - prod(C) = 0, although there's a problem with the obvious argument that I don't know off the top of my head how to fix. "Reduction" from subset sum: If you have a set S of integers, replace each integer $k \in S$ with $2^k$. Then this new set can be partitioned into two parts with the same product iff the original set could be partitioned into two parts with the same sum. The problem is that our new integers are exponentially large compared to the original ones, which means that this isn't actually allowed as a reduction. But I think it's morally correct, since the hardness of subset sum is controlled by the size of the set rather than the lengths of the elements. link|flag 1  Here's how to fix it (sorry for being 2 months late but I just saw the question). There's a randomized reduction that works based on your observation. Let M be the absolute value of the largest number in S, and note all numbers of S are represented in O(log M) bits. Pick a random prime p in the interval [2, 2^n n^2 log(M)]. Now replace each k in S with 2^k mod p. It is easy to see the above is a polytime reduction. It works whp because the total number of subsets is at most 2^n, and the probability that some S has a solution (when it really shouldn't) can be seen to be at most 1/(n 2^n). – Ryan Williams Feb 21 2010 at 6:45 2 actually it works the other way around. partition (partition elements into two sets that have equal sum) is polytime if the input sizes are written in unary. If you take this problem and take logs, then the decision problem of determining whether there's a solution where they are equal is solvable via the PARTITION dynamic program, and since the bit sizes are reduced, it's poly time. there's some nastiness with the log, but I'll handwave and say that you only need enough bits to distinguish two integers, so you can do it with bounded number of bits. p.s subset sum is poly in the size of the elements btw via the same DP link|flag Subset sum is exponential in the number of bits of precision of its inputs. And to distinguish between integer products that may themselves be exponential in the input numbers, you need a polynomial number of bits of precision. So this doesn't seem to be good enough. – David Eppstein Dec 2 2009 at 4:51 makes me wonder if a randomized fingerprinting strategy might work. – Suresh Venkat Dec 2 2009 at 17:49 Your Answer Get an OpenID or Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.897447
如何从Optarg获取值[英] How to get a value from optarg 本文是小编为大家收集整理的关于如何从Optarg获取值的处理方法,想解了如何从Optarg获取值的问题怎么解决?如何从Optarg获取值问题的解决办法?那么可以参考本文帮助大家快速定位并解决问题。 问题描述 嗨,我正在编写一个简单的客户服务器程序.在此程序中,我必须使用getopt()以获取这样的端口号和IP地址: 服务器-i 127.0.0.1 -p 10001 我不知道如何从Optarg获得值,以后在程序中使用. 推荐答案 这样怎么样: char buf[BUFSIZE+1]; snprintf(buf,BUFSIZE,"%s",optarg); 或更完整的示例: #include <stdio.h> #include <unistd.h> #define BUFSIZE 16 int main( int argc, char **argv ) { char c; char port[BUFSIZE+1]; char addr[BUFSIZE+1]; while(( c = getopt( argc, argv, "i:p:" )) != -1 ) switch ( c ) { case 'i': snprintf( addr, BUFSIZE, "%s", optarg ); break; case 'p': snprintf( port, BUFSIZE, "%s", optarg ); break; case '?': fprintf( stderr, "Unrecognized option!\n" ); break; } return 0; } 有关更多信息,请参见 getopt 的文档. 其他推荐答案 您使用一个way循环在所有参数中移动并像这样处理它们... #include <unistd.h> int main(int argc, char *argv[]) { int option = -1; char *addr, *port; while ((option = getopt (argc, argv, "i:p:")) != -1) { switch (option) { case 'i': addr = strdup(optarg); break; case 'p': port = strdup(optarg); break; default: /* unrecognised option ... add your error condition */ break; } } /* rest of program */ return 0; } 其他推荐答案 它是GetOpt文档的众多缺陷之一:它不清楚地说必须复制Optarg以供以后使用(例如,使用strdup()),因为它可能会被以后的选项覆盖或仅由由Getopt释放. 本文地址:https://www.itbaoku.cn/post/359103.html
__label__pos
0.726663
aspose file tools The moose likes Beginning Java and the fly likes Assigning values to objects in object array Big Moose Saloon   Search | Java FAQ | Recent Topics | Flagged Topics | Hot Topics | Zero Replies Register / Login Win a copy of Head First Android this week in the Android forum! JavaRanch » Java Forums » Java » Beginning Java Bookmark "Assigning values to objects in object array" Watch "Assigning values to objects in object array" New topic Author Assigning values to objects in object array expired user Greenhorn Joined: Dec 13, 2001 Posts: 11 Java is not a structured language. It is object oriented and we lack the structurs in one or another way. In C++, we can create an object array easily and can able to assign the variables so easily, even assigning the values of the member variables of an object array. In java, we will encounter NullPointerException if we try to assign value to an member variable directly. The solution is to create a temporary object of that particular object and assign the values in to the temporary object. After this we can assign the temporary object into the array. For example, class OArray { int a; char c; } class OArrayAccess { OArray o = new OArray; //temporary object OArray[] oa = new OArray[10]; //assigning values in to the temp' object o.a = 10; o.c = 'k'; //assigning values in to object array oa[0] = o; //temp' object is assigned to the object array } In order to retrive the values, we have to again assign the array object in to a temporary object and access the values. The thought of using the vector classes will help only when we deal with different instances of a class. In the case of same object in an oops environment, Object array is the only solution. Manfred Leonhardt Ranch Hand Joined: Jan 09, 2001 Posts: 1492 Hi Kadirvelu, Not sure if you are asking anything or just stating thoughts. Here are some of my thoughts about what you have said. 1. Object arrays are limited in that you must define their size when they are declared. Vectors (and other Collections) can be dynically resized after it is declared. 2. Object arrays might require a temporary variable to assign them but not to retrieve them. We can place the following at the end of your code snippet: System.out.println( oa[0].i + ", " + oa[0].c ); without a compiler error and it will run great. Regards, Manfred.   I agree. Here's the link: http://aspose.com/file-tools   subject: Assigning values to objects in object array   It's not a secret anymore!
__label__pos
0.506224
Skip to content reduce() function OpenCypher Compatibility In openCypher, the function reduce() is not defined. nGQL implements reduce() function as the Cypher way. Syntax reduce() returns the value resulting from the application of an expression on each successive element in a list in conjunction with the result of the computation thus far. This function will iterate through each element e in the given list, run the expression on e — taking into account the current partial result — and store the new partial result in the accumulator. This function is analogous to the fold or reduce method in functional languages such as Lisp and Scala. reduce(accumulator = initial, variable IN list | expression) • Arguments: Name Description accumulator A variable that will hold the result and the partial results as the list is iterated. initial An expression that runs once to give a starting value to the accumulator. list An expression that returns a list. variable The closure will have a variable introduced in its context. We decide here which variable to use. expression This expression will run once per value in the list, and produce the result value. • Returns: The type of the value returned depends on the arguments provided, along with the semantics of expression. Example nebula> RETURN reduce(totalNum = 10, n IN range(1, 3) | totalNum + n) AS r; +----+ | r | +----+ | 16 | +----+ nebula> RETURN reduce(totalNum = -4 * 5, n IN [1, 2] | totalNum + n * 2) AS r; +-----+ | r | +-----+ | -14 | +-----+ nebula> MATCH p = (n:player{name:"LeBron James"})<-[:follow]-(m) \ RETURN nodes(p)[0].age AS src1, \ nodes(p)[1].age AS dst2, \ reduce(totalAge = 100, n IN nodes(p) | totalAge + n.age) AS sum +------+------+-----+ | src1 | dst2 | sum | +------+------+-----+ | 34 | 31 | 165 | +------+------+-----+ | 34 | 29 | 163 | +------+------+-----+ | 34 | 33 | 167 | +------+------+-----+ | 34 | 26 | 160 | +------+------+-----+ | 34 | 34 | 168 | +------+------+-----+ | 34 | 37 | 171 | +------+------+-----+ nebula> LOOKUP ON player WHERE player.name == "Tony Parker" | GO FROM $-.VertexID over follow WHERE follow.degree != reduce(totalNum = 5, n IN range(1, 3) | $$.player.age + totalNum + n) YIELD $$.player.name AS id, $$.player.age AS age, follow.degree AS degree +---------------------+-----+--------+ | id | age | degree | +---------------------+-----+--------+ | "Tim Duncan" | 42 | 95 | +---------------------+-----+--------+ | "LaMarcus Aldridge" | 33 | 90 | +---------------------+-----+--------+ | "Manu Ginobili" | 41 | 95 | +---------------------+-----+--------+ Last update: March 17, 2021 Back to top
__label__pos
0.89097
Information Technology - Database Language SQL (Proposed revised text of DIS 9075) July 1992 (Second Informal Review Draft) ISO/IEC 9075:1992, Database Language SQL- July 30, 1992 Digital Equipment Corporation Maynard, Massachusetts Contents Page Foreword.........................................................xi Introduction.....................................................xiii 1 Scope ........................................................ 1 2 Normative references ......................................... 3 3 Definitions, notations, and conventions ...................... 5 3.1 Definitions ................................................ 5 3.1.1Definitions taken from ISO/IEC 10646 ....................... 5 3.1.2Definitions taken from ISO 8601 ............................ 5 3.1.3Definitions provided in this International Standard ........ 5 3.2 Notation ................................................... 7 3.3 Conventions ................................................ 9 3.3.1Informative elements ....................................... 9 3.3.2Specification of syntactic elements ........................ 9 3.3.3Specification of the Information Schema ....................10 3.3.4Use of terms ...............................................10 3.3.4Exceptions .................................................10 3.3.4Syntactic containment ......................................11 3.3.4Terms denoting rule requirements ...........................12 3.3.4Rule evaluation order ......................................12 3.3.4Conditional rules ..........................................13 3.3.4Syntactic substitution .....................................13 3.3.4Other terms ................................................14 3.3.5Descriptors ................................................14 3.3.6Index typography ...........................................15 3.4 Object identifier for Database Language SQL ................16 4 Concepts .....................................................19 4.1 Data types .................................................19 4.2 Character strings ..........................................20 4.2.1Character strings and collating sequences ..................20 4.2.2Operations involving character strings .....................22 4.2.2Operators that operate on character strings and return char- acter strings...............................................22 4.2.2Other operators involving character strings ................23 4.2.3Rules determining collating sequence usage .................23 4.3 Bit strings ................................................26 4.3.1Bit string comparison and assignment .......................27 4.3.2Operations involving bit strings ...........................27 4.3.2Operators that operate on bit strings and return bit strings ............................................................27 4.3.2Other operators involving bit strings ......................27 ii Database Language SQL 4.4 Numbers ....................................................27 4.4.1Characteristics of numbers .................................28 4.4.2Operations involving numbers ...............................29 4.5 Datetimes and intervals ....................................29 4.5.1Datetimes ..................................................30 4.5.2Intervals ..................................................32 4.5.3Operations involving datetimes and intervals ...............34 4.6 Type conversions and mixing of data types ..................34 4.7 Domains ....................................................35 4.8 Columns ....................................................36 4.9 Tables .....................................................37 4.10 Integrity constraints ......................................40 4.10.Checking of constraints ....................................41 4.10.Table constraints ..........................................41 4.10.Domain constraints .........................................43 4.10.Assertions .................................................43 4.11 SQL-schemas ................................................44 4.12 Catalogs ...................................................45 4.13 Clusters of catalogs .......................................45 4.14 SQL-data ...................................................45 4.15 SQL-environment ............................................46 4.16 Modules ....................................................46 4.17 Procedures .................................................47 4.18 Parameters .................................................47 4.18.Status parameters ..........................................47 4.18.Data parameters ............................................48 4.18.Indicator parameters .......................................48 4.19 Diagnostics area ...........................................48 4.20 Standard programming languages .............................49 4.21 Cursors ....................................................49 4.22 SQL-statements .............................................51 4.22.Classes of SQL-statements ..................................51 4.22.SQL-statements classified by function ......................52 4.22.Embeddable SQL-statements ..................................55 4.22.Preparable and immediately executable SQL-statements .......56 4.22.Directly executable SQL-statements .........................58 4.22.SQL-statements and transaction states ......................59 4.23 Embedded syntax ............................................61 4.24 SQL dynamic statements .....................................61 4.25 Direct invocation of SQL ...................................64 4.26 Privileges .................................................64 4.27 SQL-agents .................................................66 4.28 SQL-transactions ...........................................67 4.29 SQL-connections ............................................70 4.30 SQL-sessions ...............................................72 Table of Contents iii 4.31 Client-server operation ....................................74 4.32 Information Schema .........................................75 4.33 Leveling ...................................................75 4.34 SQL Flagger ................................................76 5 Lexical elements .............................................79 5.1 ...................................79 5.2 and ....................................82 5.3 ..................................................89 5.4 Names and identifiers ......................................98 6 Scalar expressions ...........................................107 6.1 ................................................107 6.2 and ...........114 6.3 ..........................................118 6.4 .........................................121 6.5 ...............................124 6.6 ...................................128 6.7 ....................................132 6.8 ..................................139 6.9 ..........................................141 6.10 .......................................144 6.11 .........................................155 6.12 .................................157 6.13 ..................................160 6.14 ................................165 6.15 ................................168 7 Query expressions ............................................173 7.1 ....................................173 7.2 ..................................176 7.3 .........................................177 7.4 ..............................................178 7.5 .............................................180 7.6 .............................................185 7.7 ..........................................187 7.8 ............................................189 7.9 ......................................191 7.10 .........................................196 7.11 , , and ....203 8 Predicates ...................................................205 8.1 ................................................205 8.2 .....................................207 8.3 ........................................211 8.4 .............................................212 iv Database Language SQL 8.5 ...........................................214 8.6 ...........................................218 8.7 ..........................220 8.8 .........................................222 8.9 .........................................223 8.10 ..........................................224 8.11 .......................................227 8.12 .........................................229 9 Data assignment rules ........................................231 9.1 Retrieval assignment .......................................231 9.2 Store assignment ...........................................234 9.3 Set operation result data types ............................237 10 Additional common elements ...................................239 10.1 .......................................239 10.2 ..........................................243 10.3 ...............................................245 10.4 ..............................248 10.5 ...........................................251 10.6 and ...252 11 Schema definition and manipulation ...........................255 11.1 ........................................255 11.2 ....................................258 11.3 .........................................260 11.4 ........................................262 11.5 ...........................................266 11.6 ..............................270 11.7 .............................272 11.8 ........................274 11.9 ..............................281 11.10 ....................................283 11.11 ....................................284 11.12 ..................................286 11.13 ................................287 11.14 ...............................288 11.15 ...................................289 11.16 ..........................291 11.17 .........................292 11.18 .....................................294 11.19 ..........................................296 11.20 ......................................300 11.21 ........................................301 11.22 ...................................304 11.23 ................................305 Table of Contents v 11.24 ...............................306 11.25 .........................307 11.26 ........................308 11.27 ....................................309 11.28 .................................311 11.29 .............................313 11.30 .....................................314 11.31 .................................318 11.32 ...................................320 11.33 ...............................323 11.34 .....................................325 11.35 .................................328 11.36 ..........................................329 11.37 .........................................333 12 Module .......................................................341 12.1 ...................................................341 12.2 .......................................344 12.3 ................................................346 12.4 Calls to a .....................................352 12.5 ..................................368 13 Data manipulation ............................................371 13.1 ...........................................371 13.2 ...........................................375 13.3 ..........................................377 13.4 ..........................................381 13.5 that identifies a view that is defined by a V, then is said to generally contain the contained in V. If contains , then generally contains . If generally contains and generally contains , then generally contains . An instance A1 of directly contains an instance B1 of if A1 contains B1 without an intervening or . Definitions, notations, and conventions 11 X3H2-92-154/DBL CBR-002 3.3 Conventions 3.3.4.3 Terms denoting rule requirements In the Syntax Rules, the term shall defines conditions that are required to be true of syntactically conforming SQL language. When such conditions depend on the contents of the schema, then they are required to be true just before the actions specified by the General Rules are performed. The treatment of language that does not conform to the SQL Formats and Syntax Rules is implementation- dependent. If any condition required by Syntax Rules is not sat- isfied when the evaluation of Access or General Rules is attempted and the implementation is neither processing non-conforming SQL language nor processing conforming SQL language in a non-conforming manner, then an exception condition is raised: syntax error or access rule violation (if this situation occurs during dynamic ex- ecution of an SQL-statement, then the exception that is raised is syntax error or access rule violation in dynamic SQL statement; if the situation occurs during direct invocation of an SQL-statement, then the exception that is raised is syntax error or access rule violation in direct SQL statement). In the Access Rules, the term shall defines conditions that are required to be satisfied for the successful application of the General Rules. If any such condition is not satisfied when the General Rules are applied, then an exception condition is raised: syntax error or access rule violation (if this situation occurs during dynamic execution of an SQL-statement, then the exception that is raised is syntax error or access rule violation in dynamic SQL statement; if the situation occurs during direct invocation of an SQL-statement, then the exception that is raised is syntax error or access rule violation in direct SQL statement). In the Leveling Rules, the term shall defines conditions that are required to be true of SQL language for it to syntactically conform to the specified level of conformance. 3.3.4.4 Rule evaluation order A conforming implementation is not required to perform the exact sequence of actions defined in the General Rules, but shall achieve the same effect on SQL-data and schemas as that sequence. The term effectively is used to emphasize actions whose effect might be achieved in other ways by an implementation. The Syntax Rules and Access Rules for contained syntactic elements are effectively applied at the same time as the Syntax Rules and Access Rules for the containing syntactic elements. The General Rules for contained syntactic elements are effectively applied be- fore the General Rules for the containing syntactic elements. Where the precedence of operators is determined by the Formats of this International Standard or by parentheses, those operators are ef- fectively applied in the order specified by that precedence. Where the precedence is not determined by the Formats or by parentheses, effective evaluation of expressions is generally performed from 12 Database Language SQL X3H2-92-154/DBL CBR-002 3.3 Conventions left to right. However, it is implementation-dependent whether ex- pressions are actually evaluated left to right, particularly when operands or operators might cause conditions to be raised or if the results of the expressions can be determined without completely evaluating all parts of the expression. In general, if some syn- tactic element contains more than one other syntactic element, then the General Rules for contained elements that appear earlier in the production for the containing syntactic element are applied before the General Rules for contained elements that appear later. For example, in the production: ::= the Syntax Rules and Access Rules for , , and are ef- fectively applied simultaneously. The General Rules for are applied before the General Rules for , and the General Rules for are applied after the General Rules for both and . If the result of an expression or search condition can be deter- mined without completely evaluating all parts of the expression or search condition, then the parts of the expression or search condi- tion whose evaluation is not necessary are called the inessential parts. If the Access Rules pertaining to inessential parts are not satisfied, then the syntax error or access rule violation exception condition is raised regardless of whether or not the inessential parts are actually evaluated. If evaluation of the inessential parts would cause an exception condition to be raised, then it is implementation-dependent whether or not that exception condition is raised. 3.3.4.5 Conditional rules Conditional rules are specified with "If" or "Case" conventions. Rules specified with "Case" conventions include a list of con- ditional sub-rules using "If" conventions. The first such "If" sub-rule whose condition is true is the effective sub-rule of the "Case" rule. The last sub-rule of a "Case" rule may specify "Otherwise". Such a sub-rule is the effective sub-rule of the "Case" rule if no preceding "If" sub-rule in the "Case" rule has a true condition. 3.3.4.6 Syntactic substitution In the Syntax and General Rules, the phrase "X is implicit" indi- cates that the Syntax and General Rules are to be interpreted as if the element X had actually been specified. In the Syntax and General Rules, the phrase "the following is implicit: Y" indicates that the Syntax and General Rules are to be interpreted as if a syntactic element containing Y had actually been specified. Definitions, notations, and conventions 13 X3H2-92-154/DBL CBR-002 3.3 Conventions In the Syntax Rules and General Rules, the phrase "former is equiv- alent to latter" indicates that the Syntax Rules and General Rules are to be interpreted as if all instances of former in the element had been instances of latter. If a BNF nonterminal is referenced in a Subclause without speci- fying how it is contained in a BNF production that the Subclause defines, then Case: - If the BNF nonterminal is itself defined in the Subclause, then the reference shall be assumed to be the occurrence of that BNF nonterminal on the left side of the defining production. - Otherwise, the reference shall be assumed to be to a BNF pro- duction in which the particular BNF nonterminal is immediately contained. 3.3.4.7 Other terms Some Syntax Rules define terms, such as T1, to denote named or unnamed tables. Such terms are used as table names or correlation names. Where such a term is used as a correlation name, it does not imply that any new correlation name is actually defined for the denoted table, nor does it affect the scopes of any actual correlation names. An SQL-statement S1 is said to be executed as a direct result of executing an SQL-statement if S1 is the SQL-statement contained in a that has been executed, or if S1 is the value of an referenced by an contained in a that has been executed, or if S1 was the value of the that was associ- ated with an by a and that same is referenced by an contained in a that has been executed. 3.3.5 Descriptors A descriptor is a conceptual structured collection of data that defines the attributes of an instance of an object of a specified type. The concept of descriptor is used in specifying the seman- tics of SQL. It is not necessary that any descriptor exist in any particular form in any database or environment. Some SQL objects cannot exist except in the context of other SQL objects. For example, columns cannot exist except in tables. Those objects are independently described by descriptors, and the de- scriptors of enabling objects (e.g., tables) are said to include the descriptors of enabled objects (e.g., columns or table con- straints). Conversely, the descriptor of an enabled object is said to be included in the descriptor of an enabling object. 14 Database Language SQL X3H2-92-154/DBL CBR-002 3.3 Conventions In other cases, certain SQL objects cannot exist unless some other SQL object exists, even though there is not an inclusion relation- ship. For example, SQL does not permit an assertion to exist if the tables referenced by the assertion do not exist. Therefore, an as- sertion descriptor is dependent on or depends on zero or more table descriptors (equivalently, an assertion is dependent on or depends on zero or more tables). In general, a descriptor D1 can be said to depend on, or be dependent on, some descriptor D2. There are two ways of indicating dependency of one construct on another. In many cases, the descriptor of the dependent construct is said to "include the name of" the construct on which it is de- pendent. In this case "the name of" is to be understood as meaning "sufficient information to identify the descriptor of"; thus an implementor might choose to use a pointer or a concatenation of , , etc. Alternatively, the descrip- tor may be said to include text (e.g., , ). In such cases, whether the implementation includes ac- tual text (with defaults and implications made explicit) or its own style of parse tree is irrelevant; the validity of the descriptor is clearly "dependent on" the existence of descriptors for objects that are referred to in it. The statement that a column "is based on" a domain, is equivalent to a statement that a column "is dependent on" that domain. An attempt to destroy a descriptor may fail if other descriptors are dependent on it, depending on how the destruction is specified. Such an attempt may also fail if the descriptor to be destroyed is included in some other descriptor. Destruction of a descriptor results in the destruction of all descriptors included in it, but has no effect on descriptors on which it is dependent. 3.3.6 Index typography In the Index to this International Standard, the following conven- tions are used: - Index entries appearing in boldface indicate the page where the word, phrase, or BNF nonterminal was defined; - Index entries appearing in italics indicate a page where the BNF nonterminal was used in a Format; and - Index entries appearing in roman type indicate a page where the word, phrase, or BNF nonterminal was used in a heading, Function, Syntax Rule, Access Rule, General Rule, Leveling Rule, Table, or other descriptive text. Definitions, notations, and conventions 15 X3H2-92-154/DBL CBR-002 3.4 Object identifier for Database Language SQL 3.4 Object identifier for Database Language SQL Function The object identifier for Database Language SQL identifies the characteristics of an SQL-implementation to other entities in an open systems environment. Format ::= ::= ::= iso | 1 | iso 1 ::= standard | 0 | standard 0 ::= 9075 ::= ::= <1987> | <1989> | <1992> <1987> ::= 0 | edition1987 0 <1989> ::= <1989 base> <1989 package> <1989 base> ::= 1 | edition1989 1 <1989 package> ::= | ::= 0 | IntegrityNo 0 ::= 1 | IntegrityYes 1 <1992> ::= 2 | edition1992 2 ::= | | ::= 0 | Low 0 ::= 1 | Intermediate 1 ::= 2 | High 2 16 Database Language SQL X3H2-92-154/DBL CBR-002 3.4 Object identifier for Database Language SQL Syntax Rules 1) An of shall not be specified unless the is specified as <1992>. 2) The value of identifies the level at which conformance is claimed as follows: a) If specifies <1992>, then Case: i) , then Entry SQL level. ii) , then Intermediate SQL level. iii) , then Full SQL level. b) Otherwise: i) , then level 1. ii) , then level 2. 3) A specification of <1989 package> as implies that the integrity enhancement feature is not implemented. A specification of <1989 package> as implies that the integrity enhancement feature is implemented. Definitions, notations, and conventions 17 X3H2-92-154/DBL CBR-002 18 Database Language SQL X3H2-92-154/DBL CBR-002 4 Concepts 4.1 Data types A data type is a set of representable values. The logical represen- tation of a value is a . The physical representation of a value is implementation-dependent. A value is primitive in that it has no logical subdivision within this International Standard. A value is a null value or a non-null value. A null value is an implementation-dependent special value that is distinct from all non-null values of the associated data type. There is effectively only one null value and that value is a member of every SQL data type. There is no for a null value, although the keyword NULL is used in some places to indicate that a null value is desired. SQL defines distinct data types named by the following s: CHARACTER, CHARACTER VARYING, BIT, BIT VARYING, NUMERIC, DECIMAL, INTEGER, SMALLINT, FLOAT, REAL, DOUBLE PRECISION, DATE, TIME, TIMESTAMP, and INTERVAL. Subclause 6.1, "", describes the semantic properties of each data type. For reference purposes, the data types CHARACTER and CHARACTER VARYING are collectively referred to as character string types. The data types BIT and BIT VARYING are collectively referred to as bit string types. Character string types and bit string types are collectively referred to as string types and values of string types are referred to as strings. The data types NUMERIC, DECIMAL, INTEGER, and SMALLINT are collectively referred to as exact numeric types. The data types FLOAT, REAL, and DOUBLE PRECISION are col- lectively referred to as approximate numeric types. Exact numeric types and approximate numeric types are collectively referred to as numeric types. Values of numeric type are referred to as numbers. The data types DATE, TIME, and TIMESTAMP are collectively referred to as datetime types. Values of datetime types are referred to as datetimes. The data type INTERVAL is referred to as an interval type. Values of interval types are called intervals. Each data type has an associated data type descriptor. The contents of a data type descriptor are determined by the specific data type that it describes. A data type descriptor includes an identifica- tion of the data type and all information needed to characterize an instance of that data type. Concepts 19 X3H2-92-154/DBL CBR-002 4.1 Data types Each host language has its own data types, which are separate and distinct from SQL data types, even though similar names may be used to describe the data types. Mappings of SQL data types to data types in host languages are described in Subclause 12.3, "", and Subclause 19.1, "". Not every SQL data type has a corresponding data type in every host language. 4.2 Character strings A character string data type is described by a character string data type descriptor. A character string data type descriptor con- tains: - the name of the specific character string data type (CHARACTER or CHARACTER VARYING; NATIONAL CHARACTER and NATIONAL CHARACTER VARYING are represented as CHARACTER and CHARACTER VARYING, respectively); - the length or maximum length in characters of the character string data type; - the catalog name, schema name, and character set name of the character set of the character string data type; and - the catalog name, schema name, and collation name of the colla- tion of the character string data type. Character sets fall into three categories: those defined by na- tional or international standards, those provided by implemen- tations, and those defined by applications. All character sets, however defined, always contain the character. Character sets defined by applications can be defined to "reside" in any schema chosen by the application. Character sets defined by stan- dards or by implementations reside in the Information Schema (named INFORMATION_SCHEMA) in each catalog, as do collations defined by standards and collations and form-of-use conversions defined by implementations. The SQL_TEXT specifies the name of a character repertoire and implied form-of- use that can represent every character that is in and all other characters that are in character sets supported by the implementation. 4.2.1 Character strings and collating sequences A character string is a sequence of characters chosen from the same character repertoire. The character repertoire from which the characters of a particular string are chosen may be specified explicitly or implicitly. A character string has a length, which is the number of characters in the sequence. The length is 0 or a positive integer. 20 Database Language SQL X3H2-92-154/DBL CBR-002 4.2 Character strings All character strings of a given character repertoire are mutu- ally comparable, subject to the restrictions specified in Table 3, "Collating sequence usage for comparisons". A collating sequence, also known as a collation, is a set of rules determining comparison of character strings in a particular char- acter repertoire. There is a default collating sequence for each character repertoire, but additional collating sequences can be defined for any character repertoire. Note: A column may be defined as having a default collating se- quence. This default collating sequence for the column may be different from the default collating sequence for its character repertoire, e.g., if the is specified in the . It will be clear from context when the term "default collating sequence" is used whether it is meant for a column or for a character repertoire. Given a collating sequence, two character strings are identical if and only if they are equal in accordance with the comparison rules specified in Subclause 8.2, "". The collat- ing sequence used for a particular comparison is determined as in Subclause 4.2.3, "Rules determining collating sequence usage". The s NATIONAL CHARACTER are used to specify a character string data type with a particular implementation-defined character repertoire. Special syntax (N'string') is provided for representing literals in that character repertoire. A character set is described by a character set descriptor. A char- acter set descriptor includes: - the name of the character set or character repertoire, - if the character set is a character repertoire, then the name of the form-of-use, - an indication of what characters are in the character set, and - the name of the default collation of the character set. For every character set, there is at least one collation. A colla- tion is described by a collation descriptor. A collation descriptor includes: - the name of the collation, - the name of the character set on which the collation operates, - whether the collation has the NO PAD or the PAD SPACE attribute, and - an indication of how the collation is performed. Concepts 21 X3H2-92-154/DBL CBR-002 4.2 Character strings 4.2.2 Operations involving character strings 4.2.2.1 Operators that operate on character strings and return character strings is an operator, |, that returns the char- acter string made by joining its character string operands in the order given. is a triadic function, SUBSTRING, that returns a string extracted from a given string according to a given numeric starting position and a given numeric length. Truncation occurs when the implied starting and ending positions are not both within the given string. is a pair of functions for converting all the lower case characters in a given string to upper case (UPPER) or all the upper case ones to lower case (LOWER), useful only in connection with strings that may contain s. is a function that invokes an installation- supplied form-of-use conversion to return a character string S2 derived from a given character string S1. It is intended, though not enforced by this International Standard, that S2 be exactly the same sequence of characters as S1, but encoded according some dif- ferent form-of-use. A typical use might be to convert a character string from two-octet UCS to one-octet Latin1 or vice versa. is a function that returns its first string ar- gument with leading and/or trailing pad characters removed. The second argument indicates whether leading, or trailing, or both leading and trailing pad characters should be removed. The third argument specifies the pad character that is to be removed. is a function for changing each charac- ter of a given string according to some many-to-one or one-to-one mapping between two not necessarily distinct character sets. The mapping, rather than being specified as part of the function, is some external function identified by a . For any pair of character sets, there are zero or more translations that may be invoked by a . A translation is described by a translation descriptor. A translation descriptor includes: - the name of the translation, - the name of the character set from which it translates, - the name of the character set to which it translates, and - an indication of how the translation is performed. 22 Database Language SQL X3H2-92-154/DBL CBR-002 4.2 Character strings 4.2.2.2 Other operators involving character strings returns the length of a given character string, as an integer, in characters, octets, or bits according to the choice of function. determines the first position, if any, at which one string, S1, occurs within another, S2. If S1 is of length zero, then it occurs at position 1 for any value of S2. If S1 does not occur in S2, then zero is returned. uses the triadic operator LIKE (or the inverse, NOT LIKE), operating on three character strings and returning a Boolean. LIKE determines whether or not a character string "matches" a given "pattern" (also a character string). The char- acters '%' (percent) and '_' (underscore) have special meaning when they occur in the pattern. The optional third argument is a charac- ter string containing exactly one character, known as the "escape character", for use when a percent or underscore is required in the pattern without its special meaning. 4.2.3 Rules determining collating sequence usage The rules determining collating sequence usage for character strings are based on the following: - Expressions where no columns are involved (e.g., literals, host variables) are by default compared using the default collating sequence for their character repertoire. Note: The default collating sequence for a character repertoire is defined in Subclause 10.4, "", and Subclause 11.28, "". - When columns are involved (e.g., comparing two columns, or com- paring a column to a literal), by default the default collating sequence of the columns involved is used so long as the columns have the same default collating sequence. - When columns are involved having different default collating sequences, explicit specification of the collating sequence in the expression is required via the when the expression participates in a comparison. - Any explicit specification of collating sequence in an expres- sion overrides any default collating sequence. To formalize this, s effectively have a coercibility attribute. This attribute has the values Coercible, Implicit, No collating sequence, and Explicit. s with the Coercible, Implicit, or Explicit attributes have a collating sequence. Concepts 23 X3H2-92-154/DBL CBR-002 4.2 Character strings A consisting of a column reference has the Implicit attribute, with collating sequence as defined when the column was created. A consisting of a value other than a column (e.g., a host variable or a literal) has the Coercible attribute, with the default collation for its char- acter repertoire. A simply containing a has the Explicit attribute, with the collating sequence specified in the . Note: When the coercibility attribute is Coercible, the collating sequence is uniquely determined as specified in Subclause 8.2, "". The tables below define how the collating sequence and the co- ercibility attribute is determined for the result of any monadic or dyadic operation. Table 1, "Collating coercibility rules for monadic operators", shows the collating sequence and coercibility rules for monadic operators, and Table 2, "Collating coercibil- ity rules for dyadic operators", shows the collating sequence and coercibility rules for dyadic operators. Table 3, "Collating se- quence usage for comparisons", shows how the collating sequence is determined for a particular comparison. _____Table_1-Collating_coercibility_rules_for_monadic_operators____ Operand Coercibility Result Coercibility _____and_Collating_Sequence_____ _____and_Collating_Sequence___ | Collating | Collating | |_Coercibility______Sequence______|_Coercibility______Sequence_____| | | | | Coercible | default | Coercible | default | | | | | | | Implicit | X | Implicit | X | | | | | | | Explicit | X | Explicit | X | | | | | | |_______No_collati|g_sequence_____|______No_collatin|_sequence_____| | | | | | _____Table_2-Collating_coercibility_rules_for_dyadic_operators_____ Result Coercibility Operand 1 Coercibility Operand 2 Coercibility and Collating _and_Collating_Sequence _and_Collating_Sequence ___Sequence___ | Collating | Collating | Col|ating |_Coercibility_Sequence__|_Coercibility_Sequence__|__CoercibilitySe|uence | | | | | Coercible | default | Coercible | default | Coercible| def|ult | | | | | | | | Coercible | default | Implicit | Y | Implicit | Y | | | | | | | | | Coercible | default | No collati|g sequence | No colla|ing | sequence 24 Database Language SQL X3H2-92-154/DBL CBR-002 4.2 Character strings _Table_2-Collating_coercibility_rules_for_dyadic_operators_(Cont.)_ Result Coercibility Operand 1 Coercibility Operand 2 Coercibility and Collating _and_Collating_Sequence _and_Collating_Sequence ___Sequence___ | Collating | Collating | Col|ating |_Coercibility_Sequence__|_Coercibility_Sequence__|__CoercibilitySe|uence | | | | | Coercible | default | Explicit | Y | Explicit | Y | | | | | | | | | Implicit | X | Coercible | default | Implicit | X | | | | | | | | | Implicit | X | Implicit | X | Implicit | X | | | | | | | | | Implicit | X | Implicit | Y /= X | No colla|ing | sequence | Implicit | X | No collati|g sequence | No collating | | | | | | sequence | | | | | | | | Implicit | X | Explicit Y | Explicit Y | | | | | | | No collati|g sequence | Any, | Any | No colla|ing | except sequence Explicit | No collating sequence | Explicit | X | Explicit X | | | | | | | Explicit X | Coercible | default | Explicit | X | | | | | | | | Explicit | X | Implicit | Y | Explicit | X | | | | | | | | | Explicit | X | No collati|g sequence | Explicit | X | | | | | | | | | Explicit | X | Explicit X | Explicit | X | | | | | | | | Explicit | X | Explicit | Y /= X | Not permi|ted:| ____________________________________________________invalid_syntax_ |__________Ta|le_3-Collat|ng_sequence_|sage_for_co|parisons________| Comparand 1 Comparand 2 Coercibility and Coercibility and _Collating_Sequence _Collating_Sequence | | | Collating Sequence | | Collatin| Collatin| Used For The | |_CoercibilitSequence|_CoercibilitSequence|__Comparison____________| | | | | | Coercible| default | Coercible| default | default | | | | | | | | Coercible| default | Implicit | Y | Y | | | | | | | | Coercible| default | No co|lating | Not permitted: invalid| sequence syntax | Coercible| default | Explicit Y | Y | | | | | | Concepts 25 X3H2-92-154/DBL CBR-002 4.2 Character strings ______Table_3-Collating_sequence_usage_for_comparisons_(Cont.)_____ Comparand 1 Comparand 2 Coercibility and Coercibility and _Collating_Sequence _Collating_Sequence | | | Collating Sequence | | Collatin| Collatin| Used For The | |_CoercibilitSequence|_CoercibilitSequence|__Comparison____________| | | | | | Implicit | X | Coercible| default | X | | | | | | | | Implicit | X | Implicit | X | X | | | | | | | | Implicit | X | Implicit | Y /= X | Not permitted: invalid| syntax | Implicit | X | No co|lating | Not permitted: invalid| | | | seq|ence | syntax | | | | | | | | Implicit | X | Explicit Y | Y | | | | | | | No co|lating | Any | Any | Not permitted: invalid| sequence except syntax Explicit | No collating | Explicit | X | X | | sequence | | | | | | | | | | Explicit X | Coercible| default | X | | | | | | | Explicit | X | Implicit | Y | X | | | | | | | | Explicit | X | No co|lating | X | sequence | Explicit | X | Explicit X | X | | | | | | | Explicit | X | Explicit | Y /= X | Not permitted: invalid| ____________________________________________syntax_________________ |For n-adic|operation| (e.g., ) with operands X1, | X2, . . . , n , the collating sequence is effectively determined by considering X1 and X2, then combining this result with X3, and so on. 4.3 Bit strings A bit string is a sequence of bits, each having the value of 0 or 1. A bit string has a length, which is the number of bits in the string. The length is 0 or a positive integer. A bit string data type is described by a bit string data type de- scriptor. A bit string data type descriptor contains: - the name of the specific bit string data type (BIT or BIT VARYING); and 26 Database Language SQL X3H2-92-154/DBL CBR-002 4.3 Bit strings - the length of the bit string data type (in bits). 4.3.1 Bit string comparison and assignment All bit strings are mutually comparable. A bit string is identical to another bit string if and only if it is equal to that bit string in accordance with the comparison rules specified in Subclause 8.2, "". Assignment of a bit string to a bit string variable is performed from the most significant bit to the least significant bit in the source string to the most significant bit in the target string, one bit at a time. 4.3.2 Operations involving bit strings 4.3.2.1 Operators that operate on bit strings and return bit strings is an operator, |, that returns the bit string made by concatenating the two bit string operands in the order given. is a triadic function identical in syntax and semantics to except that the first argument and the returned value are both bit strings. 4.3.2.2 Other operators involving bit strings returns the length (as an integer number of octets or bits according to the choice of function) of a given bit string. determines the first position, if any, at which one string, S1, occurs within another, S2. If S1 is of length zero, then it occurs at position 1 for any value of S2. If S1 does not occur in S2, then zero is returned. 4.4 Numbers A number is either an exact numeric value or an approximate numeric value. Any two numbers are mutually comparable to each other. A numeric data type is described by a numeric data type descriptor. A numeric data type descriptor contains: - the name of the specific numeric data type (NUMERIC, DECIMAL, INTEGER, SMALLINT, FLOAT, REAL, or DOUBLE PRECISION); - the precision of the numeric data type; Concepts 27 X3H2-92-154/DBL CBR-002 4.4 Numbers - the scale of the numeric data type, if it is an exact numeric data type; and - an indication of whether the precision (and scale) are expressed in decimal or binary terms. 4.4.1 Characteristics of numbers An exact numeric value has a precision and a scale. The precision is a positive integer that determines the number of significant digits in a particular radix (binary or decimal). The scale is a non-negative integer. A scale of 0 indicates that the number is an integer. For a scale of S, the exact numeric value is the integer value of the significant digits multiplied by 10-S. An approximate numeric value consists of a mantissa and an expo- nent. The mantissa is a signed numeric value, and the exponent is a signed integer that specifies the magnitude of the mantissa. An approximate numeric value has a precision. The precision is a posi- tive integer that specifies the number of significant binary digits in the mantissa. The value of an approximate numeric value is the mantissa multiplied by 10exponent. Whenever an exact or approximate numeric value is assigned to a data item or parameter representing an exact numeric value, an approximation of its value that preserves leading significant dig- its after rounding or truncating is represented in the data type of the target. The value is converted to have the precision and scale of the target. The choice of whether to truncate or round is implementation-defined. An approximation obtained by truncation of a numerical value N for an T is a value V representable in T such that N is not closer to zero than the numerical value of V and such that the absolute value of the difference between N and the numer- ical value of V is less than the absolute value of the difference between two successive numerical values representable in T. An approximation obtained by rounding of a numerical value N for an T is a value V representable in T such that the absolute value of the difference between N and the nu- merical value of V is not greater than half the absolute value of the difference between two successive numerical values repre- sentable in T. If there are more than one such values V, then it is implementation-defined which one is taken. All numerical values between the smallest and the largest value, inclusive, representable in a given exact numeric type have an approximation obtained by rounding or truncation for that type; it is implementation-defined which other numerical values have such approximations. 28 Database Language SQL X3H2-92-154/DBL CBR-002 4.4 Numbers An approximation obtained by truncation or rounding of a numerical value N for an T is a value V repre- sentable in T such that there is no numerical value representable in T and distinct from that of V that lies between the numerical value of V and N, inclusive. If there are more than one such values V then it is implementation- defined which one is taken. It is implementation-defined which numerical values have approximations obtained by rounding or trun- cation for a given approximate numeric type. Whenever an exact or approximate numeric value is assigned to a data item or parameter representing an approximate numeric value, an approximation of its value is represented in the data type of the target. The value is converted to have the precision of the target. Operations on numbers are performed according to the normal rules of arithmetic, within implementation-defined limits, except as provided for in Subclause 6.12, "". 4.4.2 Operations involving numbers As well as the usual arithmetic operators, plus, minus, times, divide, unary plus, and unary minus, there are the following func- tions that return numbers: - (see Subclause 4.2.2, "Operations involv- ing character strings", and Subclause 4.3.2, "Operations involv- ing bit strings") takes two strings as arguments and returns an integer; - (see Subclause 4.2.2, "Operations involving character strings", and Subclause 4.3.2, "Operations involv- ing bit strings") operates on a string argument and returns an integer; - (see Subclause 4.5.3, "Operations involving datetimes and intervals") operates on a datetime or interval argument and returns an integer. 4.5 Datetimes and intervals A datetime data type is described by a datetime data type descrip- tor. An interval data type is described by an interval data type descriptor. A datetime data type descriptor contains: - the name of the specific datetime data type (DATE, TIME, TIMESTAMP, TIME WITH TIME ZONE, or TIMESTAMP WITH TIME ZONE); and Concepts 29 X3H2-92-154/DBL CBR-002 4.5 Datetimes and intervals - the value of the that does not specify TEMPORARY. A derived table is a table derived directly or indirectly from one or more other tables by the evaluation of a . The values of a derived table are derived from the values of the underlying tables when the is evaluated. A viewed table is a named derived table defined by a . A viewed table is sometimes called a view. Concepts 37 X3H2-92-154/DBL CBR-002 4.9 Tables The terms simply underlying table, underlying table, leaf underly- ing table, generally underlying table, and leaf generally underly- ing table define a relationship between a derived table or cursor and other tables. The simply underlying tables of derived tables and cursors are defined in Subclause 7.9, "", Subclause 7.10, "", and Subclause 13.1, "". A viewed table has no simply underlying tables. The underlying tables of a derived table or cursor are the simply underlying tables of the derived table or cursor and the underlying tables of the simply underlying tables of the derived table or cursor. The leaf underlying tables of a derived table or cursor are the underlying tables of the derived table or cursor that do not them- selves have any underlying tables. The generally underlying tables of a derived table or cursor are the underlying tables of the derived table or cursor and, for those underlying tables of the derived table or cursor that are viewed tables, the of each viewed table and the gen- erally underlying tables of the of each viewed table. The leaf generally underlying tables of a derived table or cursor are the generally underlying tables of the derived table or cursor that do not themselves have any generally underlying tables. All base tables are updatable. Derived tables are either updatable or read-only. The operations of insert, update, and delete are permitted for updatable tables, subject to constraining Access Rules. The operations of insert, update, and delete are not allowed for read-only tables. A grouped table is a set of groups derived during the evaluation of a or a . A group is a multiset of rows in which all values of the grouping column or columns are equal if a is specified, or the group is the entire table if no is specified. A grouped table may be considered as a collection of tables. Set functions may operate on the individual tables within the grouped table. A global temporary table is a named table defined by a that specifies GLOBAL TEMPORARY. A created local temporary table is a named table defined by a that speci- fies LOCAL TEMPORARY. Global and created local temporary tables are effectively materialized only when referenced in an SQL-session. Every in every SQL-session that references a created local temporary table causes a distinct instance of that created local temporary table to be materialized. That is, the contents of a global temporary table or a created local temporary table cannot be shared between SQL-sessions. In addition, the contents of a cre- ated local temporary table cannot be shared between s of a single SQL-session. The definition of a global temporary table or a created local temporary table appears in a schema. In SQL language, 38 Database Language SQL X3H2-92-154/DBL CBR-002 4.9 Tables the name and the scope of the name of a global temporary table or a created local temporary table are indistinguishable from those of a persistent base table. However, because global temporary ta- ble contents are distinct within SQL-sessions, and created local temporary tables are distinct within s within SQL-sessions, the effective of the schema in which the global tem- porary table or the created local temporary table is instantiated is an implementation-dependent that may be thought of as having been effectively derived from the of the schema in which the global temporary table or created local temporary table is defined and the implementation-dependent SQL- session identifier associated with the SQL-session. In addition, the effective of the schema in which the created local temporary table is instantiated may be thought of as being further qualified by a unique implementation-dependent name associ- ated with the in which the created local temporary table is referenced. A declared local temporary table is a named table defined by a that is effectively materialized the first time any in the that contains the is executed. A declared local tem- porary table is accessible only by s in the that contains the . The effective of the of the declared local tem- porary table may be thought of as the implementation-dependent SQL-session identifier associated with the SQL-session and a unique implementation-dependent name associated with the that contains the . All references to a declared local temporary table are prefixed by "MODULE.". The materialization of a temporary table does not persist beyond the end of the SQL-session in which the table was materialized. Temporary tables are effectively empty at the start of an SQL- session. A table is described by a table descriptor. A table descriptor is either a base table descriptor, a view descriptor, or a derived table descriptor (for a derived table that is not a view). Every table descriptor includes: - the degree of the table (the number of column descriptors); and - the column descriptor of each column in the table. A base table descriptor describes a base table. In addition to the components of every table descriptor, a base table descriptor includes: - the name of the base table; - an indication of whether the table is a persistent base table, a global temporary table, a created local temporary table, or a declared local temporary table; and Concepts 39 X3H2-92-154/DBL CBR-002 4.9 Tables - the descriptor of each table constraint specified for the table. A derived table descriptor describes a derived table. In addi- tion to the components of every table descriptor, a derived table descriptor includes: - if the table is named, then the name of the table; - the that defines how the table is to be de- rived; and - an indication of whether the derived table is updatable or read- only (this is derived from the ); A view descriptor describes a view. In addition to the components of a derived table descriptor, a view descriptor includes: - an indication of whether the view has the CHECK OPTION; if so, whether it is to be applied as CASCADED or LOCAL. 4.10 Integrity constraints Integrity constraints, generally referred to simply as constraints, define the valid states of SQL-data by constraining the values in the base tables. A constraint is either a table constraint, a domain constraint or an assertion. A constraint is described by a constraint descriptor. A constraint descriptor is either a table constraint descriptor, a domain constraint descriptor or an assertion descriptor. Every constraint descriptor includes: - the name of the constraint; - an indication of whether or not the constraint is deferrable; - an indication of whether the initial constraint mode is deferred or immediate; A or is possibly non- deterministic if an implementation might, at two different times where the state of the SQL-data is the same, produce results that differ by more than the order of the rows due to General Rules that specify implementation-dependent behavior. No integrity constraint shall be defined using a or a that is possibly non-deterministic. 40 Database Language SQL X3H2-92-154/DBL CBR-002 4.10 Integrity constraints 4.10.1 Checking of constraints Every constraint is either deferrable or non-deferrable. Within a transaction, every constraint has a constraint mode; if a con- straint is non-deferrable, then its constraint mode is always im- mediate, otherwise it is either or immediate or deferred. Every constraint has an initial constraint mode that specifies the constraint mode for that constraint at the start of each SQL- transaction and immediately after definition of that constraint. If a constraint is deferrable, then its constraint mode may be changed (from immediate to deferred, or from deferred to immediate) by execution of a . The checking of a constraint depends on its constraint mode within the current SQL-transaction. If the constraint mode is immedi- ate, then the constraint is effectively checked at the end of each SQL-statement. If the constraint mode is deferred, then the constraint is effectively checked when the constraint mode is changed to immediate either explicitly by execution of a , or implicitly at the end of the current SQL-transaction. When a constraint is checked other than at the end of an SQL- transaction, if it is not satisfied, then an exception condition is raised and the SQL-statement that caused the constraint to be checked has no effect other than entering the exception information into the diagnostics area. When a is executed, all constraints are effectively checked and, if any constraint is not satisfied, then an exception condition is raised and the transaction is terminated by an implicit . 4.10.2 Table constraints A table constraint is either a unique constraint, a referential constraint or a table check constraint. A table constraint is de- scribed by a table constraint descriptor which is either a unique constraint descriptor, a referential constraint descriptor or a table check constraint descriptor. A unique constraint is described by a unique constraint descriptor. In addition to the components of every table constraint descriptor, a unique constraint descriptor includes: - an indication of whether it was defined with PRIMARY KEY or UNIQUE, and - the names and positions of the unique columns specified in the ; A referential constraint is described by a referential constraint descriptor. In addition to the components of every table constraint descriptor, a referential constraint descriptor includes: Concepts 41 X3H2-92-154/DBL CBR-002 4.10 Integrity constraints - the names of the referencing columns specified in the , - the names of the referenced columns and referenced table speci- fied in the , and - the value of the , if specified, and the , if specified. Note: If MATCH FULL or MATCH PARTIAL is specified for a referential constraint and if the referencing table has only one column spec- ified in for that referential constraint, or if the referencing table has more than one specified column for that , but none of those columns is nullable, then the effect is the same as if no were specified. A table check constraint is described by a table check constraint descriptor. In addition to the components of every table constraint descriptor, a table check constraint descriptor includes: - the . A unique constraint is satisfied if and only if no two rows in a table have the same non-null values in the unique columns. In addition, if the unique constraint was defined with PRIMARY KEY, then it requires that none of the values in the specified column or columns be the null value. In the case that a table constraint is a referential constraint, the table is referred to as the referencing table. The referenced columns of a referential constraint shall be the unique columns of some unique constraint of the referenced table. A referential constraint is satisfied if one of the following con- ditions is true, depending on the specified in the : - If no was specified then, for each row R1 of the referencing table, either at least one of the values of the referencing columns in R1 shall be a null value, or the value of each referencing column in R1 shall be equal to the value of the corresponding referenced column in some row of the referenced table. - If MATCH FULL was specified then, for each row R1 of the refer- encing table, either the value of every referencing column in R1 shall be a null value, or the value of every referencing column in R1 shall not be null and there shall be some row R2 of the referenced table such that the value of each referencing col- umn in R1 is equal to the value of the corresponding referenced column in R2. 42 Database Language SQL X3H2-92-154/DBL CBR-002 4.10 Integrity constraints - If MATCH PARTIAL was specified then, for each row R1 of the referencing table, there shall be some row R2 of the refer- enced table such that the value of each referencing column in R1 is either null or is equal to the value of the corresponding referenced column in R2. The referencing table may be the same table as the referenced ta- ble. A table check constraint is satisfied if and only if the specified is not false for any row of a table. 4.10.3 Domain constraints A domain constraint is a constraint that is specified for a domain. It is applied to all columns that are based on that domain, and to all values cast to that domain. A domain constraint is described by a domain constraint descriptor. In addition to the components of every constraint descriptor a domain constraint descriptor includes: - the . A domain constraint is satisfied by SQL-data if and only if, for any table T that has a column named C based on that domain, the specified , with each occurrence of VALUE re- placed by C, is not false for any row of T. A domain constraint is satisfied by the result of a if and only if the specified , with each occurrence of VALUE replaced by that result, is not false. 4.10.4 Assertions An assertion is a named constraint that may relate to the content of individual rows of a table, to the entire contents of a table, or to a state required to exist among a number of tables. An assertion is described by an assertion descriptor. In addi- tion to the components of every constraint descriptor an assertion descriptor includes: - the . An assertion is satisfied if and only if the specified is not false. Concepts 43 X3H2-92-154/DBL CBR-002 4.11 SQL-schemas 4.11 SQL-schemas An SQL-schema is a persistent descriptor that includes: - the of the SQL-schema; - the of the owner of the SQL-schema; - The of the default character set for the SQL-schema; and - the descriptor of every component of the SQL-schema. In this International Standard, the term "schema" is used only in the sense of SQL-schema. Each component descriptor is either a domain descriptor, a base table descriptor, a view descriptor, an assertion descriptor, a privilege descriptor, a character set descriptor, a collation descriptor, or a translation descriptor. The persistent objects described by the descriptors are said to be owned by or to have been created by the of the schema. A schema is created initially using a and may be subsequently modified incrementally over time by the execution of s. s are unique within a catalog. A is explicitly or implicitly qualified by a that identifies a catalog. Base tables and views are identified by s. A consists of a and an . For a per- sistent table, the identifies the schema in which the base table or view identified by the was de- fined. Base tables and views defined in different schemas can have s that are equal according to the General Rules of Subclause 8.2, "". If a reference to a does not explicitly contain a , then a specific is implied. The par- ticular associated with such a depends on the context in which the appears and is governed by the rules for . The default schema for s that are dynamically prepared in the current SQL- session through the execution of s and s is initially implementation-defined but may be changed by the use of s. 44 Database Language SQL X3H2-92-154/DBL CBR-002 4.12 Catalogs 4.12 Catalogs Catalogs are named collections of schemas in an SQL-environment. An SQL-environment contains zero or more catalogs. A catalog con- tains one or more schemas, but always contains a schema named INFORMATION_SCHEMA that contains the views and domains of the Information Schema. The method of creation and destruction of catalogs is implementation-defined. The set of catalogs that can be referenced in any SQL-statement, during any particular SQL-transaction, or during the course of an SQL-session is also implementation-defined. The default catalog for a whose does not specify an explicit to qualify the is implementation-defined. The default catalog for s that are dynami- cally prepared in the current SQL-session through the execution of s and s is ini- tially implementation-defined but may be changed by the use of s. 4.13 Clusters of catalogs A cluster is an implementation-defined collection of catalogs. Exactly one cluster is associated with an SQL-session and it defines the totality of the SQL-data that is available to that SQL-session. An instance of a cluster is described by an instance of a defi- nition schema. Given some SQL-data object, such as a view, a con- straint, a domain, or a base table, the definition of that object, and of all the objects that it directly or indirectly references, are in the same cluster of catalogs. For example, no and no can "cross" a cluster boundary. Whether or not any catalog can occur simultaneously in more than one cluster is implementation-defined. Within a cluster, no two catalogs have the same name. 4.14 SQL-data SQL-data is any data described by schemas that is under the control of an SQL-implementation in an SQL-environment. Concepts 45 X3H2-92-154/DBL CBR-002 4.15 SQL-environment 4.15 SQL-environment An SQL-environment comprises the following: - an SQL-implementation capable of processing some Level (Entry SQL, Intermediate SQL, or Full SQL) of this International Standard and at least one binding style; see Clause 23, "Conformance" for further information about binding styles; - zero or more catalogs; - zero or more s; - zero or more s; and - the SQL-data described by the schemas in the catalogs. An SQL-environment may have other implementation-defined contents. The rules determining which s are considered to be within an SQL-environment are implementation-defined. 4.16 Modules A is an object specified in the module language. A is either a persistent or an SQL-session . The mechanisms by which s are created or destroyed are implementation-defined. A consists of an optional , a , a with either or both of a and a , an optional that iden- tifies the character repertoire used for expressing the names of schema objects used in the , zero or more s, zero or more cursors specified by s, and one or more s. All s contained in the are expressed in either or the character repertoire indicated by unless they are specified with "". A compilation unit is a segment of executable code, possibly con- sisting of one or more subprograms. A is associated with a compilation unit during its execution. A single may be associated with multiple compilation units and multiple s may be associated with a single compilation unit. The manner in which this association is specified, including the possible re- quirement for execution of some implementation-defined statement, is implementation-defined. Whether a compilation unit may invoke or transfer control to other compilation units, written in the same or a different programming language, is implementation-defined. 46 Database Language SQL X3H2-92-154/DBL CBR-002 4.17 Procedures 4.17 Procedures A consists of a , a sequence of s, and a single . A in a is invoked by a compilation unit as- sociated with the by means of a host language "call" statement that specifies the of the and supplies a sequence of parameter values corresponding in number and in to the s of the . A call of a causes the that it contains to be executed. 4.18 Parameters A parameter is declared in a by a . The specifies the of its value. A parameter either assumes or supplies the value of the corresponding argument in the call of that . These s map to host language types and are not nullable except through the use of additional indicator variables. 4.18.1 Status parameters The SQLSTATE and SQLCODE parameters are status parameters. They are set to status codes that indicate either that a call of the completed successfully or that an exception condition was raised during execution of the . Note: The SQLSTATE parameter is the preferred status parameter. The SQLCODE parameter is a deprecated feature that is supported for compatibility with earlier versions of this International Standard. See Annex D, "Deprecated features". A shall specify either the SQLSTATE parameter or the SQLCODE parameter or both. The SQLSTATE parameter is a charac- ter string parameter for which exception values are defined in Clause 22, "Status codes". The SQLCODE parameter is an integer pa- rameter for which the negative exception values are implementation- defined. If a condition is raised that causes a statement to have no effect other than that associated with raising the condition (that is, not a completion condition), then the condition is said to be an exception condition or exception. If a condition is raised that permits a statement to have an effect other than that associated with raising the condition (corresponding to an SQLSTATE class value of successful completion, warning, or no data), then the condition is said to be a completion condition. Concepts 47 X3H2-92-154/DBL CBR-002 4.18 Parameters 4.18.2 Data parameters A data parameter is a parameter that is used to either assume or supply the value of data exchanged between a host program and an SQL-implementation. 4.18.3 Indicator parameters An indicator parameter is an integer parameter that is specified immediately following another parameter. Its primary use is to indicate whether the value that the other parameter assumes or supplies is a null value. An indicator parameter cannot immediately follow another indicator parameter. The other use for indicator parameters is to indicate whether string data truncation occurred during a transfer between a host program and an SQL-implementation in parameters or host variables. If a non-null string value is transferred and the length of the target data item is sufficient to accept the entire source data item, then the indicator parameter or variable is set to 0 to in- dicate that truncation did not occur. However, if the length of the target data item is insufficient, then the indicator parame- ter or variable is set to the length of the source data item (in characters or bits, as appropriate) to indicate that truncation occurred and to indicate the original length in characters or bits, as appropriate, of the source. 4.19 Diagnostics area The diagnostics area is a place where completion and exception con- dition information is stored when an SQL-statement is executed. There is one diagnostics area associated with an SQL-agent, regard- less of the number of s that the SQL-agent includes or the number of connections in use. At the beginning of the execution of any statement that is not an , the diagnostics area is emptied. An implementation shall place information about a completion condition or an exception condition reported by SQLCODE or SQLSTATE into this area. If other conditions are raised, an implementation may place information about them into this area. s containing s return a code indicating completion or exception conditions for that statement via SQLCODE or SQLSTATE, but do not modify the diagnostics area. An SQL-agent may choose the size of the diagnostics area with the ; if an SQL-agent does not specify the size of the diagnostics area, then the size of the diagnostics area is implementation-dependent, but shall always be able to hold information about at least one condition. An implementation may place information into this area about fewer conditions than are specified. The ordering of the information about conditions placed 48 Database Language SQL X3H2-92-154/DBL CBR-002 4.19 Diagnostics area into the diagnostics area is implementation-dependent, except that the first condition in the diagnostics area always corresponds to the condition specified by the SQLSTATE or SQLCODE value. 4.20 Standard programming languages This International Standard specifies the actions of s in s when those s are called by programs that conform to certain specified programming language standards. The term "standard PLN program", where PLN is the name of a program- ming language, refers to a program that conforms to the standard for that programming language as specified in Clause 2, "Normative references". This International Standard also specifies a mechanism whereby SQL language may be embedded in programs that otherwise conform to any of the same specified programming language stan- dards. Note: In this International Standard, for the purposes of inter- facing with programming languages, the data types DATE, TIME, TIMESTAMP, and INTERVAL shall be converted to or from character strings in those programming languages by means of a . It is anticipated that future evolution of programming language standards will support data types corresponding to these four SQL data types; this standard will then be amended to reflect the availability of those corresponding data types. The data type CHARACTER is also mapped to character strings in the programming languages. However, because the facilities available in the pro- gramming languages do not provide the same capabilities as those available in SQL, there shall be agreement between the host pro- gram and SQL regarding the specific format of the character data being exchanged. Specific syntax for this agreement is provided in this International standard. For standard programming lan- guages, C, COBOL, Fortran, and Pascal, bit strings are mapped to character variables in the host language in a manner described in Subclause 19.1, "". For standard pro- gramming languages Ada and PL/I, bit string variables are directly supported. 4.21 Cursors A cursor is specified by a , , or . For every or in a , a cursor is effectively created when an SQL-transaction (see Subclause 4.28, "SQL-transactions") referencing the is initiated, and destroyed when that SQL-transaction is terminated. A cursor is also effectively created when an is executed within a SQL-transaction and destroyed when that SQL-transaction is terminated. In addition, an extended dynamic Concepts 49 X3H2-92-154/DBL CBR-002 4.21 Cursors cursor is destroyed when a is exe- cuted that deallocates the prepared statement on which the extended dynamic cursor is based. A cursor is in either the open state or the closed state. The ini- tial state of a cursor is the closed state. A cursor is placed in the open state by an or and returned to the closed state by a or , a , or a . A cursor in the open state identifies a table, an ordering of the rows of that table, and a position relative to that ordering. If the does not include an , or includes an that does not specify the order of the rows completely, then the rows of the table have an order that is defined only to the extent that the specifies an order and is otherwise implementation-dependent. When the ordering of a cursor is not defined by an , the relative positions of two rows is implementation- dependent. When the ordering of a cursor is partially determined by an , then the relative positions of two rows are determined only by the ; if the two rows have equal values for the purpose of evaluating the , then their relative positions are implementation-dependent. A cursor is either read-only or updatable. If the table identified by a cursor is not updatable or if INSENSITIVE is specified for the cursor, then the cursor is read-only; otherwise, the cursor is updatable. The operations of update and delete are not allowed for read-only cursors. The position of a cursor in the open state is either before a cer- tain row, on a certain row, or after the last row. If a cursor is on a row, then that row is the current row of the cursor. A cursor may be before the first row or after the last row of a table even though the table is empty. When a cursor is initially opened, the position of the cursor is before the first row. A or positions an open cursor on a specified row of the cursor's ordering and retrieves the values of the columns of that row. An or updates the current row of the cursor. A or deletes the current row of the cursor. If an error occurs during the execution of an SQL-statement that identifies an open cursor, then, except where otherwise explic- itly defined, the effect, if any, on the position or state of that cursor is implementation-dependent. If a cursor is open, and the current SQL-transaction makes a change to SQL-data other than through that cursor, and the for that cursor specified INSENSITIVE, then the effect of that change will not be visible through that cursor before it is closed. Otherwise, whether the effect of such a change will be 50 Database Language SQL X3H2-92-154/DBL CBR-002 4.21 Cursors visible through that cursor before it is closed is implementation- dependent. 4.22 SQL-statements 4.22.1 Classes of SQL-statements An SQL-statement is a string of characters that conforms to the format and syntax rules specified in this international standard. Most SQL-statements can be prepared for execution and executed in one of a number of ways. These are: - in a , in which case it is prepared when the is created (see Subclause 4.16, "Modules") and executed when the containing procedure is called. - in an embedded SQL host program, in which case it is pre- pared when the embedded SQL host program is preprocessed (see Subclause 4.23, "Embedded syntax"). - being prepared and executed by the use of SQL-dynamic statements (which are themselves executed in one of the foregoing two ways- see Subclause 4.24, "SQL dynamic statements"). - direct invocation, in which case it is effectively prepared immediately prior to execution (see Subclause 4.25, "Direct invocation of SQL"). There are at least five ways of classifying SQL-statements: - According to their effect on SQL objects, whether persistent objects, i.e., SQL-data and schemas, or transient objects, such as SQL-sessions and other SQL-statements. - According to whether or not they start a transaction, or can, or must, be executed when no transaction is active. - According to whether or not they may be embedded. - According to whether they may be dynamically prepared and exe- cuted. - According to whether or not they may be directly executed. This International Standard permits implementations to provide ad- ditional, implementation-defined, statements that may fall into any of these categories. This Subclause will not mention those state- ments again, as their classification is entirely implementation- defined. Concepts 51 X3H2-92-154/DBL CBR-002 4.22 SQL-statements 4.22.2 SQL-statements classified by function The following are the main classes of SQL-statements: - SQL-schema statements; these may have a persistent effect on schemas - SQL-data statements; some of these, the SQL-data change state- ments, may have a persistent effect on SQL-data - SQL-transaction statements; except for the , these, and the following classes, have no effects that persist when a session is terminated - SQL-connection statements - SQL-session statements - SQL-dynamic statements - SQL-diagnostics statements - SQL embedded exception declaration The following are the SQL-schema statements: - - - - - - - - - - - - - - - - 52 Database Language SQL X3H2-92-154/DBL CBR-002 4.22 SQL-statements - - - - The following are the SQL-data statements: - - - - - - - - - - - - o o o o o o o Concepts 55 X3H2-92-154/DBL CBR-002 4.22 SQL-statements The following SQL-statements are embeddable in an embedded SQL host program, and may occur in a , though not in a : - - - The following SQL-statements are embeddable in an embedded SQL host program, but may not occur in a : - SQL embedded exception declarations Consequently, the following SQL-data statements are not embeddable in an embedded SQL host program, nor may they occur in a , nor be the in a in a : - - - - - 4.22.4 Preparable and immediately executable SQL-statements The following SQL-statements are preparable: - All SQL-schema statements - All SQL-transaction statements - All SQL-session statements - The following SQL-data statements: o o o o o o o o 56 Database Language SQL X3H2-92-154/DBL CBR-002 4.22 SQL-statements Consequently, the following SQL-statements are not preparable: - All SQL-connection statements - All SQL-dynamic statements - All SQL-diagnostics statements - SQL embedded exception declarations - The following SQL-data statements: o o o o o o o o o o 58 Database Language SQL X3H2-92-154/DBL CBR-002 4.22 SQL-statements o o o o o o 4.22.6 SQL-statements and transaction states Whether an starts a transaction de- pends on what SQL-statement is the value of . Whether an starts a transaction depends on what SQL-statement was the value of when the prepared statement identified by was prepared. The following SQL-statements are transaction initiating SQL- statements, i.e., if there is no current transaction, and a state- ment of this class is executed, a transaction is initiated: - All SQL-schema statements - The following SQL-data statements: o o o o o o o o o ::= | ::= MODULE ::= ::= ::= [ ] ::= ::= ::= [ ] ::= ::= 98 Database Language SQL X3H2-92-154/DBL CBR-002 5.4 Names and identifiers ::= ::= ::= ::= ::= | ::= ::= [ ] ::= | ::= [ ] ::= [ ] ::= GLOBAL | LOCAL ::= ::= ::= ::= [ ] ::= ::= ::= ::= ::= Lexical elements 99 X3H2-92-154/DBL CBR-002 5.4 Names and identifiers Syntax Rules 1) If a is not specified in an , then the set of characters contained in the shall be wholly contained in either or the character repertoire identified by: Case: a) If the is contained in a , then the , b) If the is contained in a that is not contained in a , then the , c) If the is contained in a that is prepared in the current SQL-session by an or a or in a that is invoked directly, then the default character set name for the SQL-session. 2) If a is specified in an , then: a) There shall be no between the and the . b) The set of characters contained in the or shall be wholly contained in the character repertoire indicated by the . 3) The sum of the number of s and the number of s in an shall not be greater than 128. 4) An is equivalent to an in which every letter that is a lower-case letter is replaced by the equivalent upper-case letter or letters. This treatment includes determination of equivalence, representation in the Information and Definition Schemas, representation in the diagnostics area, and similar uses. 5) An (with every letter that is a lower- case letter replaced by the equivalent upper-case letter), treated as the repetition of a that specifies a of SQL_TEXT, shall not be equal, according to the comparison rules in Subclause 8.2, "", to any (with every letter that is a lower-case letter replaced by the equivalent upper-case letter), treated as the repetition of a that specifies a of SQL_TEXT. 100 Database Language SQL X3H2-92-154/DBL CBR-002 5.4 Names and identifiers Note: It is the intention that no specified in this International standard or revisions thereto shall end with an . 6) If is not a , then the table identified by shall not be a declared local temporary table. 7) No shall specify DEFINITION_SCHEMA. 8) If a does not contain a , then Case: a) If the is contained in a , then the that is specified or implicit in the is implicit. b) If the is contained in a that is prepared in the current SQL-session by an or a or in a that is invoked directly, then the default for the SQL-session is implicit. c) Otherwise, the that is specified or implicit for the is implicit. 9) If a does not contain a , then Case: a) If the is contained in a , then an implementation-defined is implicit. b) If the is contained in a other than in a , then the that is specified or implicit in the is implicit. c) If the is contained in a that is prepared in the current SQL-session by an or a or in a that is invoked directly, then the default catalog name for the SQL-session is implicit. d) If the is contained in a , then Case: i) If the is contained in a , then the explicit or implicit contained in the is implicit. Lexical elements 101 X3H2-92-154/DBL CBR-002 5.4 Names and identifiers ii) Otherwise, an implementation-defined is implicit. e) Otherwise, the explicit or implicit contained in the is implicit. 10)Two s are equal if and only if they have the same and the same , regard- less of whether the s are implicit or explicit. 11)Two s are equal if and only if they have the same and the same , regard- less of whether the s are implicit or explicit. 12)An that is a is associated with a table within a particular scope. The scope of a is either a "). Scopes may be nested. In different scopes, the same may be associated with different tables or with the same table. 13)The of or shall not be a . 14)The data type of the of shall be character string with an implementation-defined character set and shall have an octet length of 128 octets or less. 15)The data type of the of shall be character string with an implementation- defined character set and shall have an octet length of 128 octets or less. 16)The data type of the of shall be character string with an implementation-defined character set and shall have an octet length of 128 octets or less. 17)In a , , or , if a is not specified, then a of LOCAL is implicit. 18)No shall specify "PUBLIC". 19)Those s that are valid s are implementation-defined. 20)Those s that are valid s are implementation- defined. 102 Database Language SQL X3H2-92-154/DBL CBR-002 5.4 Names and identifiers 21)If a does not specify a , then INFORMATION_SCHEMA is implicit. 22)If a does not specify a , then INFORMATION_SCHEMA is implicit. 23)If a does not specify a , then INFORMATION_SCHEMA is implicit. 24)The of , , and shall be character string with an implementation- defined character set and shall have an octet length of 128 octets or less. 25)If a does not specify a , then INFORMATION_SCHEMA is implicit; otherwise, INFORMATION_ SCHEMA shall be specified. Access Rules None. General Rules 1) A identifies a table. 2) Within its scope, a identifies a table. 3) A identifies a declared local temporary ta- ble. 4) A identifies a column. 5) A identifies a domain. 6) An represents an authorization iden- tifier and identifies a set of privileges. 7) A identifies a . 8) A identifies a cursor. 9) A identifies a . 10)A identifies a parameter. 11)A identifies a table constraint, a domain constraint, or an assertion. 12)A identifies a statement prepared by the execu- tion of a . The scope of a is the in which it appears and the current SQL-session. Lexical elements 103 X3H2-92-154/DBL CBR-002 5.4 Names and identifiers 13)The value of an identifies a statement prepared by the execution of a . If a of GLOBAL is specified, then the scope of the is the current SQL-session. If a of LOCAL is specified or implicit, then the scope of the state- ment name is further restricted to the in which the appears. 14)A identifies a cursor in an . 15)The value of an identifies a cursor cre- ated by the execution of an . If a of GLOBAL is specified, then the scope of the is the current SQL-session. If a of LOCAL is specified of implicit, then the scope of the cursor name is further restricted to the in which the appears. 16)A identifies an SQL descriptor area created by the execution of an . If a of GLOBAL is specified, then the scope of the is the current SQL-session. If a of LOCAL is specified or implicit, then the scope of the is further restricted to the in which the appears. 17)A identifies a catalog. 18)A identifies a schema. 19)A identifies a collating sequence. 20)A identifies a character set. 21)A identifies a character translation. 22)A identifies a form-of-use con- version. All s are implementation- defined. 23)A identifies an SQL-connection. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any or . b) Conforming Intermediate SQL language shall not contain any explicit , , , , , or . 104 Database Language SQL X3H2-92-154/DBL CBR-002 5.4 Names and identifiers 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any , , , , , or . b) An shall not specify a . Lexical elements 105 X3H2-92-154/DBL CBR-002 106 Database Language SQL X3H2-92-154/DBL CBR-002 6 Scalar expressions 6.1 Function Specify a data type. Format ::= [ CHARACTER SET ] | | | | | ::= CHARACTER [ ] | CHAR [ ] | CHARACTER VARYING | CHAR VARYING | VARCHAR ::= NATIONAL CHARACTER [ ] | NATIONAL CHAR [ ] | NCHAR [ ] | NATIONAL CHARACTER VARYING | NATIONAL CHAR VARYING | NCHAR VARYING ::= BIT [ ] | BIT VARYING ::= | ::= NUMERIC [ [ ] ] | DECIMAL [ [ ] ] Scalar expressions 107 X3H2-92-154/DBL CBR-002 6.1 | DEC [ [ ] ] | INTEGER | INT | SMALLINT ::= FLOAT [ ] | REAL | DOUBLE PRECISION ::= ::= ::= ::= DATE | TIME [ 6.3 Function Reference a table. Format ::= [ [ AS ] [ ] ] | [ AS ] [ ] | ::= ::= ::= [ { }... ] Syntax Rules 1) A immediately contained in a TR is exposed by TR. A immediately contained in a TR is exposed by TR if and only if TR does not specify a . 2) Case: a) If a TR is contained in a FC with no intervening , then the scope clause SC of TR is the of TR is the of TR is the of SC and of all s contained in SC that contain TR. 3) A that is exposed by a TR shall not be the same as any other that is exposed by a with the same scope clause as TR. 118 Database Language SQL X3H2-92-154/DBL CBR-002 6.3 4) A that is exposed by a TR shall not be the same as any other that is exposed by a with the same scope clause as TR and shall not be the same as the of any that is exposed by a with the same scope clause as TR. 5) A immediately contained in a TR has a scope clause and scope defined by that if and only if the is exposed by TR. 6) The same shall not be specified more than once in a . 7) If a is specified in a , then the number of s in the shall be the same as the degree of the table specified by the or the of that , and the name of the i-th column of that or the effective name of the i-th column of that is the i-th in that . 8) A is an updatable derived table if and only if the simply contained in the of the of the is updatable. Access Rules 1) Let T be the table identified by the immediately contained in . If the is contained in any of: a) a simply contained in a , a , a , or an ; or b) a or ; or c) a immediately contained in a or an ; or d) a immediately contained in an , then the applicable privileges shall include SELECT for T. General Rules 1) The or exposed contained in a defines that or to be an identifier of the table identified by the or of that . Scalar expressions 119 X3H2-92-154/DBL CBR-002 6.3 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) A shall not be a . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not be a . b) The optional AS shall not be specified. c) shall not be specified. 120 Database Language SQL X3H2-92-154/DBL CBR-002 6.4 6.4 Function Reference a column. Format ::= [ ] ::= | Syntax Rules 1) Let CR be the , let CN be the contained in CR, and let C be the column identified by CN. 2) If CR contains a Q, then CR shall appear within the scope of one or more s or s that are equal to Q. If there is more than one such or , then the one with the most local scope is specified. Let T be the table associated with Q. a) T shall include a column whose is CN. b) If T is a in a J, then CN shall not be a common column name in J. Note: Common column name is defined in Subclause 7.5, "". 3) If CR does not contain a , then CR shall be contained within the scope of one or more s or s whose associated tables include a column whose is CN. Let the phrase possible qualifiers denote those s and s. a) Case: i) If the most local scope contains exactly one possible qualifier, then the qualifier Q equivalent to that unique or is implicit. ii) If there is more than one possible qualifier with most local scope, then: 1) Each possible qualifier shall be a or a of a that is di- rectly contained in a J. Scalar expressions 121 X3H2-92-154/DBL CBR-002 6.4 2) CN shall be a common column name in J. Note: Common column name is defined in Subclause 7.5, "". 3) The implicit qualifier Q is implementation-dependent. The scope of Q is that which Q would have had if J had been replaced by the : ( J ) AS Q b) Let T be the table associated with Q. 4) The data type of CR is the data type of column C of T. CN shall uniquely identify a column of T. 5) If the data type of CR is character string, then CR has the Implicit coercibility attribute and its collating sequence is the default collating sequence for column C of T. 6) If the data type of CR is TIME or TIMESTAMP, then the implicit time zone of the data is the current default time zone for the SQL-session. 7) If the data type of CR is TIME WITH TIME ZONE or TIMESTAMP WITH TIME ZONE, then the time zone of the data is the time zone rep- resented in the value of CR. 8) If CR is contained in a TE and the scope clause of the immediately containing the or Q also contains TE, then CR is an outer reference to the table associated with Q. 9) Let CR be the and let C be the column identi- fied by CR. C is an underlying column of CR. If C is a , then every underlying column of C is an underlying column of CR. Note: The underlying columns of a are defined in Subclause 7.9, "". Access Rules 1) The applicable privileges shall include SELECT for T if CR is contained in any of: a) a immediately contained in a or an ; or b) a immediately contained in an . 122 Database Language SQL X3H2-92-154/DBL CBR-002 6.4 General Rules 1) The Q.CN references column C in a given row of T. 2) If the data type of CR is TIME, TIMESTAMP, TIME WITH TIME ZONE or TIMESTAMP WITH TIME ZONE, then let TZ be an INTERVAL HOUR TO MINUTE containing the value of the time zone displacement associated with CR. The value of CR, normalized to UTC, is ef- fectively computed as CR + TZ. Leveling Rules 1) The following restrictions apply for Intermediate SQL; None. 2) The following restrictions apply for Entry SQL; None. Scalar expressions 123 X3H2-92-154/DBL CBR-002 6.5 6.5 Function Specify a value derived by the application of a function to an argument. Format ::= COUNT | ::= [ ] ::= AVG | MAX | MIN | SUM | COUNT ::= DISTINCT | ALL Syntax Rules 1) If is not specified, then ALL is implicit. 2) The argument of COUNT(*) and the argument source of a is a table or a group of a grouped table as spec- ified in Subclause 7.8, "", and Subclause 7.9, "". Note: argument source is defined in Subclause 7.8, "". 3) Let T be the argument or argument source of a . 4) The simply contained in shall not contain a or a . If the contains a that is an outer reference, then that outer reference shall be the only contained in the . Note: Outer reference is defined in Subclause 6.4, "". 5) If a contains a that is an outer reference, then the shall be contained in either: a) a that is directly contained in the that directly contains the . Note: Outer reference is defined in Subclause 6.4, "". 6) Let DT be the data type of the . 7) If COUNT is specified, then the data type of the result is exact numeric with implementation-defined precision and scale of 0. 8) If MAX or MIN is specified, then the data type of the result is DT. 9) If SUM or AVG is specified, then: a) DT shall not be character string, bit string, or datetime. b) If SUM is specified and DT is exact numeric with scale S, then the data type of the result is exact numeric with implementation-defined precision and scale S. c) If AVG is specified and DT is exact numeric, then the data type of the result is exact numeric with implementation- defined precision not less than the precision of DT and implementation-defined scale not less than the scale of DT. d) If DT is approximate numeric, then the data type of the result is approximate numeric with implementation-defined precision not less than the precision of DT. e) If DT is interval, then the data type of the result is inter- val with the same precision as DT. 10)If the data type of the result is character string, then the collating sequence and the coercibility attribute are determined as in Subclause 4.2.3, "Rules determining collating sequence usage". Access Rules None. General Rules 1) Case: a) If COUNT(*) is specified, then the result is the cardinality of T. Scalar expressions 125 X3H2-92-154/DBL CBR-002 6.5 b) Otherwise, let TX be the single-column table that is the result of applying the to each row of T and eliminating null values. If one or more null values are eliminated, then a completion condition is raised: warning- null value eliminated in set function. 2) If DISTINCT is specified, then let TXA be the result of elimi- nating redundant duplicate values from TX. Otherwise, let TXA be TX. Case: a) If the COUNT is specified, then the result is the cardinality of TXA. b) If AVG, MAX, MIN, or SUM is specified, then Case: i) If TXA is empty, then the result is the null value. ii) If AVG is specified, then the result is the average of the values in TXA. iii) If MAX or MIN is specified, then the result is respec- tively the maximum or minimum value in TXA. These results are determined using the comparison rules specified in Subclause 8.2, "". iv) If SUM is specified, then the result is the sum of the values in TXA. If the sum is not within the range of the data type of the result, then an exception condition is raised: data exception-numeric value out of range. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) If a specifies DISTINCT, then the shall be a . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) If a specifies or implies ALL, then COUNT shall not be specified. b) If a specifies or implies ALL, then the shall include a that references a column of T. c) If the contains a that is an outer reference, then the shall be a . 126 Database Language SQL X3H2-92-154/DBL CBR-002 6.5 d) No contained in a shall reference a column derived from a that generally contains a . Scalar expressions 127 X3H2-92-154/DBL CBR-002 6.6 6.6 Function Specify a function yielding a value of type numeric. Format ::= | | ::= POSITION IN ::= | | ::= { CHAR_LENGTH | CHARACTER_LENGTH } ::= OCTET_LENGTH ::= BIT_LENGTH ::= EXTRACT FROM ::= | shall not contain more than one . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not specify DEFAULT. Query expressions 175 X3H2-92-154/DBL CBR-002 7.2 7.2 Function Specify a set of s to be constructed into a table. Format ::= VALUES ::= [ { }... ] Syntax Rules 1) All s shall be of the same degree. Access Rules None. General Rules 1) Let Ti be a table whose j-th column has the same data type as the j-th in the i-th and let Ti contain one row whose j-th column has the same value as the j-th in the i-th . 2) The result of the is the same as the result of T1 [ UNION ALL T2 [ . . . UNION ALL n ] . . . ] Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) A shall contain exactly one that shall be of the form "()". b) A shall be the of an . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 176 Database Language SQL X3H2-92-154/DBL CBR-002 7.3 7.3 Function Specify a table or a grouped table. Format ::= [ ] [ ] [ ] Syntax Rules 1) The result of a is a derived table in which the descriptor of the i-th column is the same as the descriptor of the i-th column of the table specified by the . 2) Let C be some column. Let TE be the . C is an underlying column of TE if and only if C is an underlying column of some contained in TE. Access Rules None. General Rules 1) If all optional clauses are omitted, then the result of the is the same as the result of the . Otherwise, each specified clause is applied to the result of the previously specified clause and the result of the is the result of the application of the last specified clause. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) If the table identified in the is a grouped view, then the shall not contain a , , or . Query expressions 177 X3H2-92-154/DBL CBR-002 7.4 7.4 Function Specify a table derived from one or more named tables. Format ::= FROM [ { }... ] Syntax Rules 1) Case: a) If the contains a single with no intervening or , then the descriptor of the result of the is the same as the descriptor of the table identified by that . b) If the contains more than one with no intervening or , then the descriptors of the columns of the result of the are the descriptors of the columns of the tables identified by the s, in the order in which the s appear in the and in the order in which the columns are defined within each table. Access Rules None. General Rules 1) Case: a) If the contains a single with no intervening or , then the result of the is the table identified by that . b) If the contains more than one with no intervening or , then the result of the is the extended Cartesian product of the tables identified by those s. The extended Cartesian product, CP, is the multiset of all rows R such that R is the concatenation of a row from each of the identified tables in the order in which they are iden- tified. The cardinality of CP is the product of the cardi- nalities of the identified tables. The ordinal position of a 178 Database Language SQL X3H2-92-154/DBL CBR-002 7.4 column in CP is N+S, where N is the ordinal position of that column in the identified table T from which it is derived and S is the sum of the degrees of the tables identified before T in the . Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) If the table identified by is a grouped view, then the shall contain exactly one . Query expressions 179 X3H2-92-154/DBL CBR-002 7.5 7.5 Function Specify a table derived from a Cartesian product, inner or outer join, or union join. Format ::= | | ::= CROSS JOIN ::= [ NATURAL ] [ ] JOIN [ ] ::= | ::= ON ::= USING ::= INNER | [ OUTER ] | UNION ::= LEFT | RIGHT | FULL ::= Syntax Rules 1) Let TR1 and TR2 be the first and second s of the , respectively. Let T1 and T2 be the tables identified by TR1 and TR2, respectively. Let TA and TB be the correlation names of TR1 and TR2, respectively. Let CP be: SELECT * FROM TR1, TR2 2) If a is specified, then 180 Database Language SQL X3H2-92-154/DBL CBR-002 7.5 Case: a) If NATURAL is specified, then a shall not be specified. b) If UNION is specified, then neither NATURAL nor a shall be specified. c) Otherwise, a shall be specified. 3) If a is specified and a is not specified, then INNER is implicit. 4) If a containing a is speci- fied, then; a) Each directly contained in the shall unambiguously reference a column of T1 or T2 or be an outer reference. b) If a directly contained in the is a , then the shall be contained in a or of s of the form COALESCE ( TA.C, TB.C ) AS C for every column C that is a corresponding join column, taken in order of their ordinal positions in T1. e) Let SL1 be a of those s of T2 that are not correspond- ing join columns, taken in order of their ordinal positions in T2. f) The descriptors of the columns of the result of the are the same as the descriptors of the columns of the result of SELECT SLCC, SLT1, SLT2 FROM TR1, TR2 7) For every column CR of the result of the that is not a corresponding join column and that corresponds to a column C1 of T1, CR is possibly nullable if any of the following conditions are true: a) RIGHT, FULL, or UNION is specified, or b) INNER, LEFT, or CROSS JOIN is specified or implicit and 1 is possibly nullable. 8) For every column CR of the result of the that is not a corresponding join column and that corresponds to a column C2 of T2, CR is possibly nullable if any of the following conditions are true: a) LEFT, FULL, or UNION is specified, or b) INNER, RIGHT, or CROSS JOIN is specified or implicit and C is possibly nullable. 9) For every column CR of the result of the that is a corresponding join column and that corresponds to a column C1 of T1 and C2 of T2, CR is possibly nullable if any of the following conditions are true: a) RIGHT, FULL, or UNION is specified and 1 is possibly nul- lable, or b) LEFT, FULL, or UNION is specified and 2 is possibly nul- lable. 10)The is a read-only table. 182 Database Language SQL X3H2-92-154/DBL CBR-002 7.5 Access Rules None. General Rules 1) Case: a) If is UNION, then let T be the empty set. b) If a is specified, then let T be the multiset of rows of CP. c) If a is specified, then let T be the multi- set of rows of CP for which the specified is true. d) If NATURAL is specified or is specified, then Case: i) If there are corresponding join columns, then let T be the multiset of rows of CP for which the corresponding join columns have equal values. ii) Otherwise, let T be the multiset of rows of CP. 2) Let P1 be the multiset of rows of T1 for which there exists in T some row that is the concatenation of some row R1 of T1 and some row R2 of T2. Let P2 be the multiset of rows of T2 for which there exists in T some row that is the concatenation of some row R1 of T1 and some row R2 of T2. 3) Let U1 be those rows of T1 that are not in P1 and let U2 be those rows of T2 that are not in P2. 4) Let D1 and D2 be the degree of T1 and T2, respectively. Let X1 be U1 extended on the right with D2 columns containing the null value. Let X2 be U2 extended on the left with D1 columns containing the null value. 5) Let XN1 and XN2 be effective distinct names for X1 and X2, re- spectively. Let TN be an effective name for T. Case: a) If INNER or is specified, then let S be the multiset of rows of T. b) If LEFT is specified, then let S be the multiset of rows resulting from: SELECT * FROM TN UNION ALL SELECT * FROM XN1 Query expressions 183 X3H2-92-154/DBL CBR-002 7.5 c) If RIGHT is specified, then let S be the multiset of rows resulting from: SELECT * FROM TN UNION ALL SELECT * FROM XN2 d) If FULL is specified, then let S be the multiset of rows resulting from: SELECT * FROM TN UNION ALL SELECT * FROM XN1 UNION ALL SELECT * FROM XN2 e) If UNION is specified, then let S be the multiset of rows resulting from: SELECT * FROM XN1 UNION ALL SELECT * FROM XN2 6) Let SN be an effective name of S. Case: a) If NATURAL is specified or a is speci- fied, then the result of the is the multiset of rows resulting from: SELECT SLCC, SLT1, SLT2 FROM SN b) Otherwise, the result of the is S. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . b) Conforming Intermediate SQL language shall not specify UNION JOIN. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . 184 Database Language SQL X3H2-92-154/DBL CBR-002 7.6 7.6 Function Specify a table derived by the application of a to the result of the preceding . Format ::= WHERE Syntax Rules 1) Let T be the result of the preceding . Each directly contained in the shall unambiguously reference a column of T or be an outer reference. Note: Outer reference is defined in Subclause 6.4, "". 2) If a directly contained in the is a , then the shall be contained in a or . Format ::= SELECT [ ] [ { ::= | ::= [ ] ::= [ AS ] Syntax Rules 1) Let T be the result of the . 2) The degree of the table specified by a is equal to the cardinality of the "*" is simply contained in a that is immediately contained in an , then the "*" is equivalent to a sequence in which each is a that references a column of T and each column of T is referenced exactly once. The columns are ref- erenced in the ascending sequence of their ordinal position within T. 4) The of QS. For all i, C is an underlying column of DCi, and of any that identifies DCi, if and only if C is an underlying column of the of DCi, or C is an underlying column of the immediately contained in QS. 6) Each directly contained in each and each contained in a directly contained in each shall unambiguously reference a column of T. 7) If T is a grouped table, then each in each that references a column of T shall refer- ence a grouping column or be specified within a . If T is not a grouped table and any contains a that contains a reference to a column of T or any directly contains a that does not contain an outer reference, then every in every that references a column of T shall be specified within a . 8) Each column of the table that is the result of a has a column descriptor that includes a data type descriptor that is the same as the data type descriptor of the from which the column was derived. 9) Case: a) If the i-th in the does not specify an and the of that is a single , then the of the i-th column of the result is C. c) Otherwise, the of the i-th column of the is implementation-dependent and different from the of any column, other than itself, of a table referenced by any contained in the SQL-statement. 10)A column of the table that is the result of a is possibly nullable if and only if it contains a for a column C that is possibly nullable, an , an , a , CAST NULL AS X (X represents a or a ), SYSTEM_ USER, or a that does not contain COUNT. 192 Database Language SQL X3H2-92-154/DBL CBR-002 7.9 11)Let TREF be the s that are simply contained in the of the . The simply un- derlying tables of the are the tables identified by the s and s contained in TREF without an intervening . 12)A QS is updatable if and only if the fol- lowing conditions hold: a) QS does not specify DISTINCT. b) Every contained in the immediately contained in QS specifies exactly one and that refers either to a base table or to an updatable derived table. Note: updatable derived table is defined in Subclause 6.3, " ". d) If the immediately contained in QS imme- diately contains a WC, then no leaf generally underlying table of QS shall be a generally underlying table of any contained in WC. e) The immediately contained in QS does not include a or a . 13)A is possibly non-deterministic if any of the following conditions are true: a) The DISTINCT is specified and one of the columns of T has a data type of character string; or b) The directly contains a that is possibly non-deterministic; or c) The contains a that contains a reference to a column of T or di- rectly contains a that does not contain an outer reference, then T is the argument or argument source of each such and the result of the is a table con- sisting of 1 row. The i-th value of the row is the value specified by the i-th . ii) If the contains a . b) A of the is a grouped view, then the | ::= TABLE ::= CORRESPONDING [ BY ] ::= 196 Database Language SQL X3H2-92-154/DBL CBR-002 7.10 Syntax Rules 1) Let T be the table specified by the . 2) The TABLE is equivalent to the ( SELECT * FROM ) 3) Let set operator be UNION [ALL], EXCEPT [ALL], or INTERSECT [ALL]. 4) T is an updatable table and the is updatable if and only if it simply contains a QE or a QS and: a) the contains QE or QS without an inter- vening that specified UNION or EXCEPT; b) the contains QE or QS without an interven- ing that specifies INTERSECT; and c) QE or QS is updatable. 5) Case: a) If a is a , then the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . b) If a is an , then the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the table identified by the contained in the . c) Otherwise, the column descriptor of the i-th column of the is same as the column descriptor of the i- th column of the , except that the is implementation-dependent and different from the of any column, other than itself, of a table referenced by any contained in the SQL-statement. 6) Case: a) If a is a , then the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . Query expressions 197 X3H2-92-154/DBL CBR-002 7.10 b) Otherwise, the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . 7) Case: a) If a is a , then the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . b) Otherwise, the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . 8) If a set operator is specified in a or a , then let T1, T2, and TR be respec- tively the first operand, the second operand, and the result of the or . Let TN1 and TN2 be the effective names for T1 and T2, respectively. 9) If a set operator is specified in a or a , then let OP be the set operator. Case: a) If CORRESPONDING is specified, then: i) Within the columns of T1, the same shall not be specified more than once and within the columns of T2, the same shall not be specified more than once. ii) At least one column of T1 shall have a that is the of some column of T2. iii) Case: 1) If is not specified, then let SL be a of those s explic- itly appearing in the in the order that these s appear in the . Every in the shall be a of both T1 and T2. iv) The or is equivalent to: ( SELECT SL FROM TN1 ) OP ( SELECT SL FROM TN2 ) 198 Database Language SQL X3H2-92-154/DBL CBR-002 7.10 b) If CORRESPONDING is not specified, then T1 and T2 shall be of the same degree. 10)Case: a) If the is a , then the column descriptor of the i-th column of the is same as the column descriptor of the i-th column of the . b) Otherwise, i) Case: 1) Let C be the of the i-th column of T1. If the of the i-th column of T2 is C, then the of the i-th column of TR is C. 2) Otherwise, the of the i-th column of TR is implementation-dependent and different from the of any column, other than itself, of any table referenced by any contained in the SQL-statement. ii) The data type of the i-th column of TR is determined by applying Subclause 9.3, "Set operation result data types", to the data types of the i-th column of T1 and the i-th column of T2. If the i-th column of both T1 and T2 are known not nullable, then the i-th column of TR is known not nullable; otherwise, the i-th column of T is possibly nullable. 11)Case: a) If a is a , then the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . b) Otherwise, the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . 12)Case: a) If a is a , then the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . Query expressions 199 X3H2-92-154/DBL CBR-002 7.10 b) Otherwise, i) Case: 1) Let C be the of the i-th column of T1. If the of the i-th column of T2 is C, then the of the i-th column of TR is C. 2) Otherwise, the of the i-th column of TR is implementation-dependent and different from the of any column, other than itself, of any table referenced by any contained in the SQL-statement. ii) The data type of the i-th column of TR is determined by applying Subclause 9.3, "Set operation result data types", to the data types of the i-th column of T1 and the i-th column of T2. If the i-th column of both T1 and T2 are known not nullable, then the i-th column of TR is known not nullable; otherwise, the i-th column of T is possibly nullable. 13)Case: a) If a is a , then the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . b) Otherwise, the column descriptor of the i-th column of the is the same as the column descriptor of the i-th column of the . 14)The simply underlying tables of a are the tables identified by those s, s, and s contained in the without an intervening , an intervening , or an intervening . 15)A is possibly non-deterministic if a) it contains a set operator UNION and ALL is not specified, or if it contains EXCEPT or INTERSECT; and b) the first or second operand contains a column that has a data type of character string. 16)The underlying columns of each column of QE and of QE itself are defined as follows: a) A column of a has no underlying columns. b) The underlying columns of every i-th column of a ST are the underlying columns of the i-th column of the table immediately contained in ST. 200 Database Language SQL X3H2-92-154/DBL CBR-002 7.10 c) If no set operator is specified, then the underlying columns of every i-th column of QE are the underlying columns of the i-th column of the simply contained in QE. d) If a set operator is specified, then the underlying columns of every i-th column of QE are the underlying columns of the i-th column of T1 and those of the i-th column of T2. e) Let C be some column. C is an underlying column of QE if and only if C is an underlying column of some column of QE. Access Rules None. General Rules 1) Case: a) If no set operator is specified, then T is the result of the specified or . b) If a set operator is specified, then the result of applying the set operator is a table containing the following rows: i) Let R be a row that is a duplicate of some row in T1 or of some row in T2 or both. Let m be the number of duplicates of R in T1 and let n be the number of duplicates of R in T2, where m 0 and n 0. ii) If ALL is not specified, then Case: 1) If UNION is specified, then Case: A) If m > 0 or n > 0, then T contains exactly one dupli- cate of R. B) Otherwise, T contains no duplicate of R. 2) If EXCEPT is specified, then Case: A) If m > 0 and n = 0, then T contains exactly one dupli- cate of R. B) Otherwise, T contains no duplicate of R. 3) If INTERSECT is specified, then Case: A) If m > 0 and n > 0, then T contains exactly one dupli- cate of R. Query expressions 201 X3H2-92-154/DBL CBR-002 7.10 B) Otherwise, T contains no duplicates of R. iii) If ALL is specified, then Case: 1) If UNION is specified, then the number of duplicates of R that T contains is (m + n). 2) If EXCEPT is specified, then the number of duplicates of R that T contains is the maximum of (m - n) and 0. 3) If INTERSECT is specified, then the number of duplicates of R that T contains is the minimum of m and n. Note: See the General Rules of Subclause 8.2, "". 2) If a set operator is specified, then for each column whose data type is interval, let UDT be in turn the data type of the cor- responding column of T and let SV be the value of the column in each row of the first and second operands. The value of the corresponding column of T in the corresponding row of T is CAST (SV AS UDT) Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) A shall not be a except in an . b) Conforming Intermediate SQL shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not specify EXCEPT. b) A shall not specify INTERSECT. c) A shall not contain a . d) A shall not specify CORRESPONDING. e) If UNION is specified, then except for column names, the descriptors of the first and second operands shall be iden- tical and the descriptor of the result is identical to the descriptor of the operands. 202 Database Language SQL X3H2-92-154/DBL CBR-002 7.11 , , and 7.11 , , and Function Specify a scalar value, a row, or a table derived from a . Format ::= ::= ::= ::= Syntax Rules 1) The degree of a shall be 1. 2) The degree of a shall be greater than 1. 3) The data type of a is the data type of the column of the immediately contained in the . 4) The data types of the columns of a or are the data types of the respective columns of the immediately contained in the or . Access Rules None. General Rules 1) If the cardinality of a or a is greater than 1, then an exception condition is raised: cardinal- ity violation. Leveling Rules 1) The following restrictions apply for Intermediate SQL: 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) If a is contained in a , then the in the shall not contain a or a and shall not identify a grouped view. Query expressions 203 X3H2-92-154/DBL CBR-002 7.11 , , and b) The contained in a shall be a . 204 Database Language SQL X3H2-92-154/DBL CBR-002 8 Predicates 8.1 Function Specify a condition that can be evaluated to give a truth value of true, false, or unknown. Format ::= | | | | | | | | | Syntax Rules None. Access Rules None. General Rules 1) The result of a is a truth value derived according to the General Rules of Subclause 8.2, "", Subclause 8.3, "", Subclause 8.4, "", Subclause 8.5, "", Subclause 8.6, "", Subclause 8.7, "", Subclause 8.8, "", Subclause 8.9, "", Subclause 8.10, "", or Subclause 8.11, "", as appropriate. Predicates 205 X3H2-92-154/DBL CBR-002 8.1 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) A shall not be a . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . b) Conforming Entry SQL language shall not contain any . 206 Database Language SQL X3H2-92-154/DBL CBR-002 8.2 8.2 Function Specify a comparison of two row values. Format ::= ::= | | | | | Syntax Rules 1) The two s shall be of the same degree. 2) Let corresponding values be values with the same ordinal posi- tion in the two s. 3) The data types of the corresponding values of the two s shall be comparable. 4) Let X be a value in the first and Y be the corresponding value in the second . If X and Y have data type character string, then the pair-wise comparison collating sequence used to compare X and Y is de- termined by the table for collating sequences for comparisons (Subclause 4.2.3, "Rules determining collating sequence usage"). For any pair of corresponding character strings, let CS be the identified collating sequence. Access Rules None. General Rules 1) Let X and Y be any two corresponding s. Let XV and YV be the values represented by X and Y, respectively. Case: a) If XV or YV is the null value, then "X Y" is un- known. Predicates 207 X3H2-92-154/DBL CBR-002 8.2 b) If XV and YV are non-null values, then "X Y" is true or false as follows: i) "X = Y" is true if and only if XV and YV are equal. ii) "X <> Y" is true if and only if XV and YV are not equal. iii) "X < Y" is true if and only if XV is less than YV. iv) "X > Y" is true if and only if XV is greater than YV. v) "X <= Y" is true if and only if XV is not greater than YV. vi) "X >= Y" is true if and only if XV is not less than YV. vii) "X Y" is false if and only if "X Y" is not true. 2) Numbers are compared with respect to their algebraic value. 3) The comparison of two character strings is determined as fol- lows: a) If the length in characters of X is not equal to the length in characters of Y, then the shorter string is effectively replaced, for the purposes of comparison, with a copy of itself that has been extended to the length of the longer string by concatenation on the right of one or more pad char- acters, where the pad character is chosen based on CS. If CS has the NO PAD attribute, then the pad character is an implementation-dependent character different from any char- acter in the character set of X and Y that collates less than any string under CS. Otherwise, the pad character is a . b) The result of the comparison of X and Y is given by the col- lating sequence CS. c) Depending on the collating sequence, two strings may com- pare as equal even if they are of different lengths or con- tain different sequences of characters. When the operations MAX, MIN, DISTINCT, references to a grouping column, and the UNION, EXCEPT, and INTERSECT operators refer to character strings, the specific value selected by these operations from a set of such equal values is implementation-dependent. Note: If the coercibility attribute of the comparison is Coercible, then the collating sequence used is the default de- fined for the character repertoire. See also other Syntax Rules in this Subclause, Subclause 10.4, "", and Subclause 11.28, "". 208 Database Language SQL X3H2-92-154/DBL CBR-002 8.2 4) The comparison of two bit string values, X and Y, is determined by comparison of their bits with the same ordinal position. If Xi and Yi are the values of the i-th bits of X and Y, re- spectively, and if LX is the length in bits of X and LY is the length in bits of Y, then: a) X is equal to Y if and only if X = LY and Xi = Yi for all i. b) X is less than Y if and only if: i) LX < LY and Xi = Yi for all i less than or equal to LX; or ii) Xi = Yi for all i < n and Xn = 0 and Yn = 1 for some n less than or equal to the minimum of LX and LY. 5) The comparison of two datetimes is determined according to the interval resulting from their subtraction. Let X and Y be the two values to be compared and let H be the least significant of X and Y. The result of X Y is defined as: ( X - Y ) H INTERVAL (0) H Note: Two datetimes are comparable only if they have the same s; see Subclause 4.5.1, "Datetimes". 6) The comparison of two intervals is determined by the compari- son of their corresponding values after conversion to integers in some common base unit. Let X and Y be the two intervals to be compared. Let A TO B be the specified or implied datetime qualifier of X and C TO D be the specified or implied datetime qualifier of Y. Let T be the least significant of B and D and let U be a datetime qualifier of the form T(N), where N is an large enough so that significance is not lost in the CAST operation. X is effectively replaced by CAST (X AS INTERVAL U). Y is effectively replaced by CAST (Y AS INTERVAL U). The result of the comparison is effectively computed as: CAST ( X AS INTEGER ) CAST ( Y AS INTEGER ) 7) Let Rx and Ry be the two s of the and let RXi and RYi be the i-th s of Rx and Ry, respectively. "Rx Ry" is true, false, or unknown as follows: a) "x = Ry" is true if and only if RXi = RYi for all i. b) "x <> Ry" is true if and only if RXi <> RYi for some i. c) "x < Ry" is true if and only if RXi = RYi for all i < n and RXn < RYn for some n. d) "x > Ry" is true if and only if RXi = RYi for all i < n and RXn > RYn for some n. Predicates 209 X3H2-92-154/DBL CBR-002 8.2 e) "x <= Ry" is true if and only if Rx = Ry or Rx < Ry. f) "x >= Ry" is true if and only if Rx = Ry or Rx > Ry. g) "x = Ry" is false if and only if "Rx <> Ry" is true. h) "x <> Ry" is false if and only if "Rx = Ry" is true. i) "x < Ry" is false if and only if "Rx >= Ry" is true. j) "x > Ry" is false if and only if "Rx <= Ry" is true. k) "x <= Ry" is false if and only if "Rx > Ry" is true. l) "x >= Ry" is false if and only if "Rx < Ry" is true. m) "x Ry" is unknown if and only if "Rx Ry" is neither true nor false. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 210 Database Language SQL X3H2-92-154/DBL CBR-002 8.3 8.3 Function Specify a range comparison. Format ::= [ NOT ] BETWEEN AND Syntax Rules 1) The three s shall be of the same degree. 2) Let respective values be values with the same ordinal position in the two s. 3) The data types of the respective values of the three s shall be comparable. 4) Let X, Y, and Z be the first, second, and third s, respectively. 5) "X NOT BETWEEN Y AND Z" is equivalent to "NOT ( X BETWEEN Y AND Z )". 6) "X BETWEEN Y AND Z" is equivalent to "X>=Y AND X<=Z". Access Rules None. General Rules None. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Predicates 211 X3H2-92-154/DBL CBR-002 8.4 8.4 Function Specify a quantified comparison. Format ::= [ NOT ] IN ::= | ::= { }... Syntax Rules 1) Let IVL be an . ( IVL ) is equivalent to the : ( VALUES IVL ) 2) Let RVC be the and let IPV be the . 3) The expression RVC NOT IN IPV is equivalent to NOT ( RVC IN IPV ) 4) The expression RVC IN IPV is equivalent to RVC = ANY IPV Access Rules None. General Rules None. 212 Database Language SQL X3H2-92-154/DBL CBR-002 8.4 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain a in an that is not a . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Predicates 213 X3H2-92-154/DBL CBR-002 8.5 8.5 Function Specify a pattern-match comparison. Format ::= [ NOT ] LIKE [ ESCAPE ] ::= ::= ::= Syntax Rules 1) The data types of , , and shall be character string. , , and shall be comparable. 2) Let M be the result of the of the , let P be the result of the of the , and let E be the result of the of the if one is specified. 3) "M NOT LIKE P" is equivalent to "NOT (M LIKE P)". 4) Case: a) If is not specified, then the collat- ing sequence used for the is determined by Table 3, "Collating sequence usage for comparisons", taking as comparand 1 and as comparand 2. b) Otherwise, let C1 be the coercibility attribute and collat- ing sequence of the , and C2 be the coercibility attribute and collating sequence of the . Let C3 be the resulting coercibility attribute and collating sequence as determined by Table 2, "Collating coercibility rules for dyadic operators", taking C1 as the operand 1 coercibility and C2 as the operand 2 coercibility. The collating sequence used for the is determined by Table 3, "Collating sequence usage for comparisons", taking C3 as the coercibility attribute and collating sequence of comparand 1 and as comparand 2. 214 Database Language SQL X3H2-92-154/DBL CBR-002 8.5 Access Rules None. General Rules 1) If an is specified and M, P, or E is the null value, then M LIKE P ESCAPE E is unknown. 2) If an is not specified and M or P is the null value, then M LIKE P is unknown. 3) Case: a) If an is specified, then: i) If the length in characters of E is not equal to 1, then an exception condition is raised: data exception-invalid escape character. ii) If there is not a partitioning of the string P into sub- strings such that each substring has length 1 or 2, no substring of length 1 is the escape character E, and each substring of length 2 is the escape character E followed by either the escape character E, an character, or the character, then an exception condition is raised: data exception-invalid escape sequence. If there is such a partitioning of P, then in that parti- tioning, each substring with length 2 represents a single occurrence of the second character of that substring. Each substring with length 1 that is the character represents an arbitrary character specifier. Each substring with length 1 that is the character represents an arbitrary string specifier. Each substring with length 1 that is neither the character nor the character represents the character that it contains. b) If an is not specified, then each character in P represents an arbitrary character spec- ifier, each character in P represents an arbitrary string specifier, and each character in P that is neither the character nor the character represents itself. Predicates 215 X3H2-92-154/DBL CBR-002 8.5 4) The string P is a sequence of the minimum number of substring specifiers such that each of P is part of exactly one substring specifier. A substring specifier is an arbitrary character specifier, an arbitrary string spec- ifier, or any sequence of s other than an arbitrary character specifier or an arbitrary string specifier. 5) Case: a) If M and P are character strings whose lengths are variable and if the lengths of both M and P are 0, then M LIKE P is true. b) The M LIKE P is true if there exists a partitioning of M into substrings such that: i) A substring of M is a sequence of 0 or more contiguous s of M and each of M is part of exactly one substring. ii) If the i-th substring specifier of P is an arbitrary char- acter specifier, the i-th substring of M is any single . iii) If the i-th substring specifier of P is an arbitrary string specifier, then the i-th substring of M is any sequence of 0 or more s. iv) If the i-th substring specifier of P is neither an arbi- trary character specifier nor an arbitrary string speci- fier, then the i-th substring of M is equal to that sub- string specifier according to the collating sequence of the , without the appending of characters to M, and has the same length as that substring specifier. v) The number of substrings of M is equal to the number of substring specifiers of P. c) Otherwise, M LIKE P is false. 216 Database Language SQL X3H2-92-154/DBL CBR-002 8.5 Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) The shall be a . b) A shall be a . c) An shall be a . Predicates 217 X3H2-92-154/DBL CBR-002 8.6 8.6 Function Specify a test for a null value. Format ::= IS [ NOT ] NULL Syntax Rules None. Access Rules None. General Rules 1) Let R be the value of the . 2) If all the values in R are the null value, then "R IS NULL" is true; otherwise, it is false. 3) If none of the values in R are the null value, then "R IS NOT NULL" is true; otherwise, it is false. Note: For all R, "R IS NOT NULL" has the same result as "NOT R IS NULL" if and only if R is of degree 1. Table 12, " semantics", specifies this behavior. ________________Table_12-_semantics________________ R IS R IS NOT NOT R IS NOT R IS NOT _Expression_______NULL____NULL__________NULL__________NULL_________ | degree 1: null | true | false | false | true | | | | | | | | degree 1: not | false | true | true | false | null | degree > 1: | true | false | false | true | | all null | | | | | | | | | | | | degree > 1: | false | false | true | true | | some null | | | | | | | | | | | | degree > 1: | false | true | true | false | |_none_null______|_______|_____________|____________|______________| | | | | | | |Leveling Rules | | | | | | | | | | | 218 Database Language SQL X3H2-92-154/DBL CBR-002 8.6 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall be a . Predicates 219 X3H2-92-154/DBL CBR-002 8.7 8.7 Function Specify a quantified comparison. Format ::= ::= | ::= ALL ::= SOME | ANY Syntax Rules 1) The shall be of the same degree as the result of the . 2) The data types of the values of the shall be respectively comparable to those of the columns of the . 3) The collating sequence for each pair of respective values in the is determined in the same manner as described in Subclause 8.2, "". Access Rules None. General Rules 1) Let R be the result of the and let T be the result of the . 2) The result of "R T" is derived by the application of the implied "R RT" to every row RT in T: Case: a) If T is empty or if the implied is true for every row RT in T, then "R T" is true. b) If the implied is false for at least one row RT in T, then "R T" is false. 220 Database Language SQL X3H2-92-154/DBL CBR-002 8.7 c) If the implied is true for at least one row RT in T, then "R T" is true. d) If T is empty or if the implied is false for every row RT in T, then "R T" is false. e) If "R T" is neither true nor false, then it is unknown. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Predicates 221 X3H2-92-154/DBL CBR-002 8.8 8.8 Function Specify a test for a non-empty set. Format ::= EXISTS Syntax Rules None. Access Rules None. General Rules 1) Let T be the result of the . 2) If the cardinality of T is greater than 0, then the result of the is true; otherwise, the result of the is false. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 222 Database Language SQL X3H2-92-154/DBL CBR-002 8.9 8.9 Function Specify a test for the absence of duplicate rows. Format ::= UNIQUE Syntax Rules None. Access Rules None. General Rules 1) Let T be the result of the . 2) If there are no two rows in T such that the value of each column in one row is non-null and is equal to the value of the cor- responding column in the other row according to Subclause 8.2, "", then the result of the is true; otherwise, the result of the is false. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . Predicates 223 X3H2-92-154/DBL CBR-002 8.10 8.10 Function Specify a test for matching rows. Format ::= MATCH [ UNIQUE ] [ PARTIAL | FULL ] Syntax Rules 1) The shall be of the same degree as the . 2) The data types of the values of the shall be respectively comparable to those of the corresponding columns of the . 3) The collating sequence for each pair of respective values in the is determined in the same manner as described in Subclause 8.2, "". Access Rules None. General Rules 1) Let R be the . 2) If neither PARTIAL nor FULL is specified, then Case: a) If some value in R is the null value, then the is true. b) If no value in R is the null value, then Case: i) If UNIQUE is not specified and there exists a (possibly non-unique) row RTi of the such that R = RTi then the is true. ii) If UNIQUE is specified and there is a unique row RTi of the such that R = RTi 224 Database Language SQL X3H2-92-154/DBL CBR-002 8.10 then the is true. iii) Otherwise, the is false. 3) If PARTIAL is specified, then Case: a) If all values in R are the null value, then the is true. b) Otherwise, Case: i) If UNIQUE is not specified and there exists a (possibly non-unique) row RTi of the such that each non-null value of R equals its corresponding value in RTi, then the is true. ii) If UNIQUE is specified and there is a unique row RTi of the such that each non-null value of R equals its corresponding value in RTi, then the is true. iii) Otherwise, the is false. 4) If FULL is specified, then Case: a) If all values in R are the null value, then the is true. b) If no values in R are the null value, then Case: i) If UNIQUE is not specified and there exists a (possibly non-unique) row RTi of the such that R = RTi then the is true. ii) If UNIQUE is specified and there exists a unique row RTi of the such that R = RTi then the is true. iii) Otherwise, the is false. c) Otherwise, the is false. Predicates 225 X3H2-92-154/DBL CBR-002 8.10 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 226 Database Language SQL X3H2-92-154/DBL CBR-002 8.11 8.11 Function Specify a test for an overlap between two events. Format ::= OVERLAPS ::= ::= Syntax Rules 1) The degree of and shall both be 2. 2) The data type of the first column of each shall be a datetime data type and the first column of each shall be comparable. Note: Two datetimes are comparable only if they have the same s; see Subclause 4.5.1, "Datetimes". 3) The data type of the second column of each shall be a datetime data type or INTERVAL. Case: a) If the data type is INTERVAL, then the precision of the data type shall be such that the interval can be added to the datetime data type contained in the first column of the . b) If the data type is a datetime data type, then it shall be comparable with the datetime data type contained in the first column of the . Access Rules None. General Rules 1) Let D1 be the value of the first column of and D2 be the value of the first column of . Predicates 227 X3H2-92-154/DBL CBR-002 8.11 2) Case: a) If the data type of the second column of is a datetime data type, then let E1 be the value of the second column of . b) If the data type of the second column of is INTERVAL, then let I1 be the value of the second column of . Let E1 = D1 + I1. 3) If D1 is the null value or if E1 < D1, then let S1 = E1 and let T1 = D1. Otherwise, let S1 = D1 and let T1 = E1. 4) Case: a) If the data type of the second column of is a datetime data type, then let E2 be the value of the second column of . b) If the data type of the second column of is INTERVAL, then let I2 be the value of the second column of . Let E2 = D2 + I2. 5) If D2 is the null value or if E2 < D2, then let S2 = E2 and let T2 = D2. Otherwise, let S2 = D2 and let T2 = E2. 6) The result of the is the result of the following expression: ( S1 > S2 AND NOT ( S1 >= T2 AND T1 >= T2 ) ) OR ( S2 > S1 AND NOT ( S2 >= T1 AND T2 >= T1 ) ) OR ( S1 = S2 AND ( T1 <> T2 OR T1 = T2 ) ) Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . 228 Database Language SQL X3H2-92-154/DBL CBR-002 8.12 8.12 Function Specify a condition that has the truth value true, false, or unknown, depending on the result of applying boolean operators to specified conditions. Format ::= | OR ::= | AND ::= [ NOT ] ::= [ IS [ NOT ] ] ::= TRUE | FALSE | UNKNOWN ::= | Syntax Rules 1) If NOT is specified in a , then let BP be the contained and let TV be the contained . The is equivalent to: ( NOT ( BP IS TV ) ) Access Rules None. General Rules 1) The result is derived by the application of the specified boolean operators ("AND", "OR", "IS", and "NOT") to the results derived from each evaluation. If boolean operators are not specified, then the result of the is the result of the specified . Predicates 229 X3H2-92-154/DBL CBR-002 8.12 2) NOT(true) is false, NOT(false) is true, and NOT(unknown) is unknown. Table 13, "Truth table for the AND boolean", Table 14, "Truth table for the OR boolean", and Table 15, "Truth table for the IS boolean" specify the semantics of AND, OR, and IS, respectively. ______________Table_13-Truth_table_for_the_AND_boolean_____________ ___________________________________________________________________ | true | true | false | unknown | | | | | | | false | false | false | false | | | | | | |__________|_________|__________|__________________________________| | | | | | ______________Table_14-Truth_table_for_the_OR_boolean______________ ___________________________________________________________________ | true | true | true | true | | | | | | | false | true | false | unknown | | | | | | |__________|_________|__________|__________________________________| | | | | | ______________Table_15-Truth_table_for_the_IS_boolean______________ _IS_________TRUE______FALSE______UNKNOWN___________________________ | true | true | false | false | | | | | | | false | false | true | false | | | | | | |__________|_________|__________|__________________________________| | | | | | 3) When a S is evaluated against a row of a table, each reference to a column of that table by a directly contained in S is a reference to the value of that column in that row. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) A shall not specify a . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 230 Database Language SQL X3H2-92-154/DBL CBR-002 9 Data assignment rules 9.1 Retrieval assignment Function Specify rules for value assignments that retrieve SQL-data. Syntax Rules 1) Let T and V be a TARGET and VALUE specified in an application of this Subclause. 2) If the data type of T is character string, bit string, numeric, datetime, or interval, then the data type of V shall be a mu- tually assignable character string type, a bit string type, a numeric type, the same datetime type, or a comparable interval type, respectively. General Rules 1) If V is the null value, then Case: a) If an indicator is specified for T, then that indicator is set to -1. b) If no indicator is specified for T, then an exception con- dition is raised: data exception-null value, no indicator parameter. 2) If V is not the null value and T has an indicator, then Case: a) If the data type of T is character string or bit string and the length in characters or bits, respectively, M of V is greater than the length in characters or bits, respectively, of T, then the indicator is set to M. If M exceeds the maxi- mum value that the indicator can contain, then an exception condition is raised: data exception-indicator overflow. b) Otherwise, the indicator is set to 0. 3) If V is not the null value, then Case: a) If the data type of T is fixed-length character string with length in characters L and the length in characters of V is equal to L, then the value of T is set to V. Data assignment rules 231 X3H2-92-154/DBL CBR-002 9.1 Retrieval assignment b) If the data type of T is fixed-length character string with length in characters L, and the length in characters of V is greater than L, then the value of T is set to the first L characters of V and a completion condition is raised: warning-string data, right truncation. c) If the data type of T is fixed-length character string with length in characters L, and the length in characters M of V is smaller than L, then the first M characters of T are set to V, and the last L-M characters of T are set to s. d) If the data type of T is variable-length character string and the length in characters M of V is not greater than the maximum length in characters of T, then the value of T is set to V and the length in characters of T is set to M. e) If the data type of T is variable-length character string and the length in characters of V is greater than the maximum length in characters L of T, then the value of T is set to the first L characters of V, the length in characters of T becomes L, and a completion condition is raised: warning- string data, right truncation. f) If the data type of T is fixed-length bit string with length in bits L and the length in bits of V is equal to L, then the value of T is set to V. g) If the data type of T is fixed-length bit string with length in bits L and the length in bits of V is greater than L, then the value of T is set to the first L bits of V and a completion condition is raised: warning-string data, right truncation. h) If the data type of T is fixed-length bit string with length in bits L and the length in bits M of V is smaller than L, then the first M bits of T are set to V, the remaining bits of T are set to bits each with the value of 0, and a comple- tion condition is raised: warning-implicit zero-bit padding. i) If the data type of T is variable-length bit string and the length in bits M of V is not greater than the maximum length in bits of T, then the value of T is set to V and the length in bits of T is set to M. j) If the data type of T is variable-length bit string, and the length in bits of V is greater than the maximum length in bits L of T, then the value of T is set to the first L bits of V, the length in bits of T is set to L, and a completion condition is raised: warning-string data, right truncation. 232 Database Language SQL X3H2-92-154/DBL CBR-002 9.1 Retrieval assignment k) If the data type of T is numeric and there is an approxi- mation obtained by rounding or truncation of the numerical value of V for the data type of T, then the value of T is set to such an approximation. If there is no such approximation, then an exception condi- tion is raised: data exception-numeric value out of range. If the data type of T is exact numeric, then it is implementation- defined whether the approximation is obtained by rounding or by truncation. l) If the data type of T is datetime and there is a representa- tion of the value of V in the data type of T, then the value of T is set to that representation. m) If the data type of T is interval and there is a representa- tion of the value of V in the data type of T, then the value of T is set to that representation. Otherwise, an exception condition is raised: data exception-interval field overflow. Data assignment rules 233 X3H2-92-154/DBL CBR-002 9.2 Store assignment 9.2 Store assignment Function Specify rules for value assignments that store SQL-data. Syntax Rules 1) Let T and V be a TARGET and VALUE specified in an application of this Subclause. 2) If the data type of T is character string, bit string, numeric, datetime, or interval, then the data type of V shall be char- acter string, bit string, numeric, the same datetime type, or a comparable interval type, respectively. General Rules 1) Let T be an object column. 2) If the value of V is the null value, then the value of T is set to the null value. 3) Otherwise, let V denote a non-null value of T. Case: a) If the data type of T is fixed-length character string with length in characters L and the length in characters of V is equal to L, then the value of T is set to V. b) If the data type of T is fixed-length character string with length in characters L and the length in characters M of V is larger than L, then Case: i) If the rightmost M-L characters of V are all s, then the value of T is set to the first L characters of V. ii) If one or more of the rightmost M-L characters of V are not s, then an exception condition is raised: data exception-string data, right truncation. c) If the data type of T is fixed-length character string with length in characters L and the length in characters M of V is less than L, then the first M characters of T are set to V and the last L-M characters of T are set to s. d) If the data type of T is variable-length character string and the length in characters M of V is not greater than the maximum length in characters of T, then the value of T is set to V and the length in characters of T is set to M. e) If the data type of T is variable-length character string and the length in characters M of V is greater than the maximum length in characters L of T, then, 234 Database Language SQL X3H2-92-154/DBL CBR-002 9.2 Store assignment Case: i) If the rightmost M-L characters of V are all s, then the value of T is set to the first L characters of V and the length in characters of T is set to L. ii) If one or more of the rightmost M-L characters of V are not s, then an exception condition is raised: data exception-string data, right truncation. f) If the data type of T is fixed-length bit string with length in bits L and the length in bits of V is equal to L, then the value of T is set to V. g) If the data type of T is fixed-length bit string with length in bits L and the length in bits M of V is greater than L, then an exception condition is raised: data exception-string data, right truncation. h) If the data type of T is fixed-length bit string with length in bits L and the length in bits M of V is less than L, then an exception condition is raised: data exception-string data, length mismatch. i) If the data type of T is variable-length bit string and the length in bits M of V is not greater than the maximum length in bits of T, then the value of T is set to V and the length in bits of T is set to M. j) If the data type of T is variable-length bit string, and the length in bits M of V is greater than the maximum length in bits L of T, then an exception condition is raised: data exception-string data, right truncation. k) If the data type of T is numeric and there is an approxi- mation obtained by rounding or truncation of the numerical value of V for the data type of T, then the value of T is set to such an approximation. If there is no such approximation, then an exception condi- tion is raised: data exception-numeric value out of range. If the data type of T is exact numeric, then it is implementation- defined whether the approximation is obtained by rounding or by truncation. l) If the data type of T is datetime and there is a representa- tion of the value of V in the data type of T, then the value of T is set to that representation. m) If the data type of T is interval and there is a representa- tion of the value of V in the data type of T, then the value of T is set to that representation. Otherwise, an exception condition is raised: data exception-interval field overflow. Data assignment rules 235 X3H2-92-154/DBL CBR-002 9.2 Store assignment 4) If the column definition of T includes the name of a domain whose domain descriptor includes a domain constraint D, then D is effectively checked. If D is not satisfied, then an exception condition is raised: integrity constraint violation. 236 Database Language SQL X3H2-92-154/DBL CBR-002 9.3 Set operation result data types 9.3 Set operation result data types Function Specify the Syntax Rules and result data types for s and s having set operators. Syntax Rules 1) Let DTS be a set of data types specified in an application of this Subclause. 2) All of the data types in DTS shall be comparable. 3) Case: a) If any of the data types in DTS is character string, then all data types in DTS shall be character string, and all of them shall have the same character repertoire. That charac- ter repertoire is the character repertoire of the result. The character set of the result is the character set of one of the data types in DTS. The specific character set chosen is implementation-dependent. The collating sequence and the co- ercibility attribute are determined as specified in Table 2, "Collating coercibility rules for dyadic operators". Case: i) If any of the data types in DTS is variable-length char- acter string, then the result data type is variable-length character string with maximum length in characters equal to the maximum of the lengths in characters and maximum lengths in characters of the data types in DTS. ii) Otherwise, the result data type is fixed-length character string with length in characters equal to the maximum of the lengths in characters of the data types in DTS. b) If any of the data types in DTS is bit string, then all data types in DTS shall be bit string. Case: i) If any of the data types in DTS is variable-length bit string, then the result data type is variable-length bit string with maximum length in bits equal to the maximum of the lengths in bits and maximum lengths in bits of the data types in DTS. ii) Otherwise, the result data type is fixed-length bit string with length in bits equal to the maximum of the lengths in bits of the data types in DTS. Data assignment rules 237 X3H2-92-154/DBL CBR-002 9.3 Set operation result data types c) If all of the data types in DTS are exact numeric, then the result data type is exact numeric with implementation-defined precision and with scale equal to the maximum of the scales of the data types in DTS. d) If any data type in DTS is approximate numeric, then each data type in DTS shall be numeric and the result data type is approximate numeric with implementation-defined precision. e) If any data type in DTS is a datetime data type, then each data type in DTS shall be the same datetime data type. The result data type is the same datetime data type. f) If any data type in DTS is interval, then each data type in DTS shall be interval. If the precision of any data type in DTS specifies YEAR or MONTH, then the precision of each data type shall specify only YEAR or MONTH. If the preci- sion of any data type in DTS specifies DAY, HOUR, MINUTE, or SECOND(N), then the precision of no data type of DTS shall specify the s YEAR and MONTH. The result data type is interval with precision "S TO E", where S and E are the most significant of the s and the least significant of the s of the data types in DTS, respectively. General Rules None. 238 Database Language SQL X3H2-92-154/DBL CBR-002 10 Additional common elements 10.1 Function Specify the precision of an interval data type. Format ::= TO | ::= [ ] ::= | SECOND [ ] ::= [ ] | SECOND [ [ ] ] ::= | SECOND ::= YEAR | MONTH | DAY | HOUR | MINUTE ::= ::= Additional common elements 239 X3H2-92-154/DBL CBR-002 10.1 Syntax rules 1) There is a significance of ordering of s. In order from most significant to least significant, the ordering is: YEAR, MONTH, DAY, HOUR, MINUTE, and SECOND. A or with an i is more significant than a or with an j if i>j. An or with an i is more significant than an or with an j if i>j. 2) If TO is specified, then: a) shall be more significant than , b) shall not specify MONTH, and c) if specified YEAR, then shall spec- ify MONTH. 3) The maximum value of is implementation-defined, but shall not be less than 2. 4) The maximum value of is implementation-defined, but shall not be less than 6. 5) An , if specified, shall be greater than 0 and shall not be greater than the implementation- defined maximum. If is not specified, then an of 2 is implicit. 6) An , if specified, shall be greater than or equal to 0 and shall not be greater than the implementation-defined maximum. If SECOND is specified and is not specified, then an of 6 is implicit. Access Rules None. General Rules 1) An item qualified by an contains the date- time fields identified by the . Case: a) If the specifies a , then the identifies a single . Any reference to the most significant or 240 Database Language SQL X3H2-92-154/DBL CBR-002 10.1 least significant of the item refers to that . b) Otherwise, the identifies those datetime fields from to , inclusive. 2) An specifies Case: a) If the is SECOND, then the number of decimal digits of precision before the specified or implied decimal point of the seconds . b) Otherwise, the number of decimal digits of precision of the first . 3) An specifies the num- ber of decimal digits of precision following the specified or implied decimal point in the SECOND. 4) If is not specified and and are the same , then the is equivalent to a that is that . 5) The length in positions of an item of type interval is computed as follows. Case: a) If the item is a year-month interval, then Case: i) If the is a , then the length in positions of the item is the implicit or explicit of the . ii) Otherwise, the length in positions of the item is the im- plicit or explicit of the plus 2 (the length of the that is the ) plus 1 (the length of the between the and the in a ). b) Otherwise, Case: i) If the is a that does not specify SECOND, then the length in positions of the item is the implicit or explicit of the . Additional common elements 241 X3H2-92-154/DBL CBR-002 10.1 ii) If the is a that specifies SECOND, then the length in positions of the item is the implicit or explicit of the plus the implicit or explicit . If is greater than zero, then the length in positions of the item is increased by 1 (the length in positions of the between the and the ). iii) Otherwise, let participating datetime fields mean the date- time fields that are less significant than the and more significant than the of the . The length in positions of each par- ticipating datetime field is 2. Case: 1) If is SECOND, then the length in positions of the item is the implicit or explicit , plus 3 times the number of par- ticipating datetime fields (each participating datetime field has length 2 positions , plus the s or s that precede them have length 1 position), plus the implicit or explicit , plus 1 (the length in positions of the preceding the ). If is greater than zero, then the length in positions of the item is increased by 1 (the length in positions of the within the field identified by the ). 2) Otherwise, the length in positions of the item is the implicit or explicit , plus 3 times the number of participating datetime fields (each participating datetime field has length 2 positions, plus the s or s that pre- cede them have length 1 position), plus 2 (the length in positions of the ), plus 1 (the length in positions of the preceding the ). Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . 242 Database Language SQL X3H2-92-154/DBL CBR-002 10.2 10.2 Function Specify a standard programming language. Format ::= LANGUAGE ::= ADA | C | COBOL | FORTRAN | MUMPS | PASCAL | PLI Syntax Rules None. Access Rules None. General Rules 1) The standard programming language specified by the clause is defined in the International Standard identified by the keyword. Table 16, "Standard programming languages", specifies the relationship. ______________Table_16-Standard_programming_languages______________ Language _keyword______Relevant_standard____________________________________ | ADA | ISO/IEC 8652 | | | | | C | ISO/IEC 9899 | | | | | COBOL | ISO 1989 | | | | | FORTRAN | ISO/IEC 1539 | | | | | MUMPS | ISO/IEC 11756 | | | | | PASCAL | ISO 7185 and ISO/IEC 10206 | | | | |_PLI________|_ISO_6160____________________________________________| | | | Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. Additional common elements 243 X3H2-92-154/DBL CBR-002 10.2 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not specify MUMPS. 244 Database Language SQL X3H2-92-154/DBL CBR-002 10.3 10.3 Function Specify privileges. Format ::= ALL PRIVILEGES | ::= [ { }... ] ::= SELECT | DELETE | INSERT [ ] | UPDATE [ ] | REFERENCES [ ] | USAGE ::= ::= PUBLIC | Syntax Rules 1) If the of the or specifying specifies , then let T be the table identified by that . T shall not be a declared local temporary table. 2) If T is a temporary table, then shall specify ALL PRIVILEGES. 3) Each in a shall identify a column of T. 4) UPDATE () is equivalent to the spec- ification of UPDATE () for each in . INSERT () is equivalent to the specification of INSERT () for each in . REFERENCES () is equivalent to the specification of Additional common elements 245 X3H2-92-154/DBL CBR-002 10.3 REFERENCES () for each in . 5) ALL PRIVILEGES is equivalent to the specification of all of the privileges on for which the current has grantable privilege descriptors. Access Rules None. General Rules 1) A of PUBLIC denotes at all times a list of s containing all of the s in the SQL environment. 2) The set of applicable privileges for an includes those privileges defined by privilege descriptors associated with that , together with those defined by privilege descriptors associated with PUBLIC. 3) UPDATE () specifies the UPDATE privilege on the in- dicated column and implies one or more column privilege descrip- tors. If the is omitted, then UPDATE specifies the UPDATE privilege on all columns of T including any column subsequently added to T and implies a table privilege descriptor and one or more column privilege descriptors. 4) INSERT () specifies the INSERT privilege on the in- dicated column and implies one or more column privilege descrip- tors. If the is omitted, then INSERT specifies the INSERT privilege on all columns of T including any column subsequently added to T and implies a table privilege descriptor and one or more column privilege descriptors. 5) REFERENCES () specifies the REFERENCES privilege on the indicated column and implies one or more column privilege descriptors. If the is omitted, then REFERENCES specifies the REFERENCES privilege on all columns of T including any column subsequently added to T and implies a table privilege descriptor and one or more column privilege descriptors. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) An that specifies INSERT shall not contain a . 246 Database Language SQL X3H2-92-154/DBL CBR-002 10.3 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Additional common elements 247 X3H2-92-154/DBL CBR-002 10.4 10.4 Function Identify a character set. Format ::= | | | | ::= ::= ::= ::= ::= Syntax Rules 1) The s, s, s, and s that are supported are implementation-defined. 2) A character set identified by a , by an , by a , or by an has associated with it a privilege descriptor that was effec- tively defined by the GRANT USAGE ON CHARACTER SET CS TO PUBLIC WITH GRANT OPTION where CS is the contained in the . The grantor of the privilege descriptor is set to the special grantor value "_SYSTEM". 3) The s shall include SQL_TEXT. 248 Database Language SQL X3H2-92-154/DBL CBR-002 10.4 Access Rules 1) Let C be the contained in the . The applicable privileges shall include USAGE on C. General Rules 1) A identifies a character set. Let the identified character set be CS. Note: A character set comprises the characters in the character set's repertoire together with a form-of-use that specifies the convention for arranging those characters into character strings. 2) A specifies the name of a character repertoire that is defined by a national or interna- tional standard. The character repertoire and form-of-use of CS, implied by the , are defined by the standard that defined that . The default collating sequence of the character reper- toire is defined by the order of the characters in the standard and has the PAD SPACE attribute. 3) An speci- fies the name of a character repertoire that is implementation- defined. The character repertoire and form-of-use of CS, implied by the , are implementation-defined. The default collating sequence of the character repertoire and whether the collating sequence has the NO PAD attribute or the PAD SPACE attribute is implementation- defined. 4) A identifies a charac- ter set whose descriptor is in some schema whose is not INFORMATION_SCHEMA. Note: The default collating sequence and form-of-use of CS are as defined in Subclause 11.28, "". 5) A identifies form-of-use that is defined by some national or international standard. That form-of-use is the form-of-use of CS. The charac- ter repertoire of CS is as defined in that standard. The default collating sequence of the character repertoire is defined by the order of the characters in ISO/IEC 10646 and has the PAD SPACE attribute. Note: Specific forms-of-use implied by this rule include ISO 2022 code extension techniques. 6) An identifies an implementation-defined form-of-use that shall be the form-of-use of CS. The implied character repertoire and Additional common elements 249 X3H2-92-154/DBL CBR-002 10.4 default collating sequence of CS and whether the collating se- quence has the NO PAD attribute or the PAD SPACE attribute are implementation-defined. Note: Specific forms-of-use implied by this rule include implementation-defined techniques such as mixed one-octet/two- octet Latin/Kanji or Compound String. 7) There is a character set descriptor for every character set that can be specified by a . Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . 250 Database Language SQL X3H2-92-154/DBL CBR-002 10.5 10.5 Function Specify a collating sequence. Format ::= COLLATE Syntax Rules None. Access Rules 1) Let C be the contained in the . The applicable privileges shall include USAGE on C. General Rules None. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Additional common elements 251 X3H2-92-154/DBL CBR-002 10.6 and 10.6 and Function Specify the name of a constraint and its attributes. Format ::= CONSTRAINT ::= [ [ NOT ] DEFERRABLE ] | [ NOT ] DEFERRABLE [ ] ::= INITIALLY DEFERRED | INITIALLY IMMEDIATE Syntax Rules 1) If a is contained in a , and if the contains a , then that shall be the same as the specified or implicit of the containing . 2) The of shall be differ- ent from the of the of any other constraint defined in the same schema. 3) If is not specified, then INITIALLY IMMEDIATE is implicit. 4) Case: a) If INITIALLY DEFERRED is specified, then: i) NOT DEFERRABLE shall not be specified. ii) If DEFERRABLE is not specified, then DEFERRABLE is im- plicit. b) If INITIALLY IMMEDIATE is specified or implicit and nei- ther DEFERRABLE nor NOT DEFERRABLE is specified, then NOT DEFERRABLE is implicit. Access Rules None. 252 Database Language SQL X3H2-92-154/DBL CBR-002 10.6 and General Rules 1) If NOT DEFERRABLE is specified, then the constraint is not de- ferrable; otherwise it is deferrable. 2) If is INITIALLY DEFERRED, then the ini- tial constraint mode for the constraint is deferred; otherwise, the initial constraint mode for the constraint is immediate. 3) If, on completion of any SQL-statement, the constraint mode of any constraint is immediate, then that constraint is effectively checked. Note: This includes the cases where is a , a , or the statement that causes a constraint with a constraint mode of initially immediate to be created. 4) When a constraint is effectively checked, if the constraint is not satisfied, then an exception condition is raised: integrity constraint violation. If this exception condition is raised as a result of executing a , then SQLSTATE is not set to integrity constraint violation, but is set to transaction rollback-integrity constraint violation (see the General Rules of Subclause 14.3, ""). Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no ex- plicit . Note: This means that INITIALLY IMMEDIATE NOT DEFERRABLE is implicit. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Intermediate SQL language shall contain no . Additional common elements 253 X3H2-92-154/DBL CBR-002 254 Database Language SQL X3H2-92-154/DBL CBR-002 11 Schema definition and manipulation 11.1 Function Define a schema. Format ::= CREATE SCHEMA [ ] [ ... ] ::= | AUTHORIZATION | AUTHORIZATION ::= ::= DEFAULT CHARACTER SET ::= | | | | | | | Syntax Rules 1) If is not specified, then a equal to is implicit. 2) If AUTHORIZATION is not speci- fied, then Schema definition and manipulation 255 X3H2-92-154/DBL CBR-002 11.1 Case: a) If the is contained in a that has a specified, then an equal to that is implicit for the . b) Otherwise, an equal to the SQL- session is implicit. 3) The of the explicit or implicit shall be different from the of the of any other schema in the catalog identified by the of . 4) If a appears in a in a , then the effective and during processing of the is the and specified or implicit in the . Other SQL-statements executed in s in the have the and specified or implicit for the . 5) If is not specified, then a containing an implementation-defined is im- plicit. Access Rules 1) The privileges necessary to execute the are implementation-defined. General Rules 1) A schema S is created with a name equal to the explicit or im- plicit and a default character set name equal to the of the explicit or implicit . 2) The is the current for privilege determination for S. 3) Those objects defined by s (base tables, views, constraints, domains, assertions, character sets, translations, collations, privileges) and their associated descriptors are effectively created. 4) The explicit or implicit is used as the default character set used for all s and s that do not specify an explicit charac- ter set. 256 Database Language SQL X3H2-92-154/DBL CBR-002 11.1 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . b) Conforming Intermediate SQL language shall not contain any . c) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Intermediate SQL language shall not contain any . b) A shall specify AUTHORIZATION and shall not specify a . c) A shall not be speci- fied. d) Conforming Entry SQL language shall not contain any . Schema definition and manipulation 257 X3H2-92-154/DBL CBR-002 11.2 11.2 Function Destroy a schema. Format ::= DROP SCHEMA ::= CASCADE | RESTRICT Syntax Rules 1) Let S be the schema identified by . 2) S shall identify a schema in the catalog identified by the ex- plicit or implicit . 3) If RESTRICT is specified, then S shall not contain any per- sistent base tables, global temporary tables, created local temporary tables, views, domains, assertions, character sets, collations, or translations. Note: If CASCADE is specified, then such objects will be dropped by the effective execution of the SQL schema manipulation state- ments specified in the General Rules of this Subclause. Access Rules 1) The current shall be equal to the that owns the schema identified by the . General Rules 1) Let T be the of any base table or temporary ta- ble contained in S. The following is effectively executed: DROP TABLE T CASCADE 2) Let V be the of any view contained in S. The fol- lowing is effectively executed: DROP VIEW V CASCADE 3) Let D be the of any domain contained in S. The following is effectively executed: DROP DOMAIN D CASCADE 258 Database Language SQL X3H2-92-154/DBL CBR-002 11.2 4) Let A be the of any assertion contained in S. The following is effectively exe- cuted: DROP ASSERTION A 5) Let CD be the of any collation definition contained in S. The following is effectively executed: DROP COLLATION CD 6) Let TD be the of any translation contained in S. The following is effectively executed: DROP TRANSLATION TD 7) Let RD be the of any character set con- tained in S. The following is effectively executed: DROP CHARACTER SET RD 8) The identified schema and its description are destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . Schema definition and manipulation 259 X3H2-92-154/DBL CBR-002 11.3 11.3 Function Define a persistent base table, a created local temporary table, or a global temporary table. Format ::= CREATE [ { GLOBAL | LOCAL } TEMPORARY ] TABLE [ ON COMMIT { DELETE | PRESERVE } ROWS ] ::= [ { }... ] ::= | Syntax Rules 1) If a is contained in a , and if the contains a , then that shall be the same as the specified or implicit of the containing . 2) The schema identified by the explicit or implicit schema name of the shall not include a table descriptor whose table name is . 3) If ON COMMIT is specified, then TEMPORARY shall be specified. 4) If TEMPORARY is specified and ON COMMIT is not specified, then ON COMMIT DELETE ROWS is implicit. 5) A shall contain at least one . 6) The scope of the is the . Access Rules 1) If a is contained in a , then the current shall be equal to the that owns the schema identified by the implicit or explicit of the . 260 Database Language SQL X3H2-92-154/DBL CBR-002 11.3 General Rules 1) A defines either a persistent base table, a global temporary table or a created local temporary table. If GLOBAL is specified, then a global temporary table is defined. If LOCAL is specified, then a created local temporary table is defined. Otherwise, a persistent base table is defined. 2) The degree of the table being created is initially set to 0; the General Rules of Subclause 11.4, "" specify the degree of the table being created during the definition of columns in that table. 3) A table descriptor is created that describes the table being defined. a) The name included in the table descriptor is . b) The table descriptor includes the degree of the table, which is the number of s in the that are s. 4) A set of privilege descriptors is created that define the priv- ileges INSERT, SELECT, UPDATE, DELETE, and REFERENCES on this table and INSERT, SELECT, UPDATE, and REFERENCES for every in the table definition to the of the or in which the appears. These privileges are grantable. The grantor for each of these privilege descriptors is set to the special grantor value "_SYSTEM". Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not specify TEMPORARY and shall not reference any global or local tem- porary table. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Schema definition and manipulation 261 X3H2-92-154/DBL CBR-002 11.4 11.4 Function Define a column of a table. Format ::= { | } [ ] [ ... ] [ ] ::= [ ] [ ] ::= NOT NULL | | | Syntax Rules 1) Case: a) If the is contained in a , then let T be the table defined by that . b) If the is contained in a , then let T be the table declared by that . c) If the is contained in an , then let T be the table identified in the containing . The in the shall be different from the of any column of T. 2) The i-th column of the table is described by the i-th in the . The name and the data type or domain of the column are specified by the and or , respectively. 3) Let C be the of a . 4) If is specified, then let D be the domain identi- fied by the . 262 Database Language SQL X3H2-92-154/DBL CBR-002 11.4 5) The data type of the column is Case: a) If is specified, then that data type. b) Otherwise, the data type of D. 6) If the data type of the column is character string, then the collation of the column is Case: a) If is specified, then the collation speci- fied by that . b) If is specified, then the collation of D, if any. c) Otherwise, the default collation of the character set of the column. Note: The character set of a column is determined by its data type. 7) If a is specified, then: a) Let DT be the . b) The data type of the column is DT. c) If DT is CHARACTER or CHARACTER VARYING and does not spec- ify a , then the specified or implicit in the of the that created the schema identified by the immedi- ately contained in the of the containing or is implicit. d) If DT is a that identifies a char- acter set that specifies a and the does not contain a , then the of the is implicit in the . 8) If is specified, then data type shall be a character string type. 9) If a is specified, then let CND be the if one is specified and let CND be a zero-length string otherwise; let CA be the if specified and let CA be a zero-length string otherwise. The is equivalent to a as follows: Schema definition and manipulation 263 X3H2-92-154/DBL CBR-002 11.4 Case: a) If a is specified that con- tains the NOT NULL, then it is equivalent to a that contains the follow- ing : CND CHECK ( C IS NOT NULL ) CA b) If a is specified that con- tains a , then it is equivalent to a that contains the following : CND (C) CA Note: The is defined in Subclause 11.7, "". c) If a is specified that con- tains a , then it is equivalent to a that contains the following : CND FOREIGN KEY (C) CA Note: The is defined in Subclause 11.8, "". d) If a is specified that con- tains a , then it is equivalent to a that contains the follow- ing : CND CHECK ( ) CA Each directly contained in the shall reference column C. Access Rules 1) If is specified, then the applicable privileges shall include USAGE on D.. General Rules 1) A defines a column in a table. 2) The specifies the default collating sequence for the column. If is not specified, then the default collating sequence is that used for comparisons of Coercible coercibility attribute, as defined in Subclause 8.2, "". 3) If the specifies , then a data type descriptor is created that describes the data type of the column being defined. 264 Database Language SQL X3H2-92-154/DBL CBR-002 11.4 4) The degree of the table T being defined in the containing or or altered by the containing is increased by 1. 5) A column descriptor is created that describes the column being defined. The name included in the column descriptor is . If the specifies , then the column descriptor includes the data type descriptor of the column; otherwise, the column descriptor includes the name of the domain of the column. The ordinal position included in the column descriptor is equal to the degree of T. If the contains a , then the of the is included in the column descriptor. The column descriptor includes the nullability char- acteristic of the column, determined according to the rules in Subclause 4.8, "Columns". The column descriptor is included in the table descriptor for T. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not contain a . b) A shall not contain a . c) Conforming Intermediate SQL language shall contain no . Schema definition and manipulation 265 X3H2-92-154/DBL CBR-002 11.5 11.5 Function Specify the default for a column or domain. Format ::= DEFAULT ::= | | USER | CURRENT_USER | SESSION_USER | SYSTEM_USER | NULL Syntax Rules 1) The subject data type of a is the data type specified in the descriptor identified by the containing , , , or . 2) If USER is specified, then CURRENT_USER is implicit. 3) Case: a) If a is specified, then: Case: i) If the subject data type is character string, then the shall be a . If the length of the subject data type is fixed, then the length in characters of the shall not be greater than the length of the subject data type. If the length of the subject data type is variable, then the length in characters of the shall not be greater than the maximum length of the subject data type. The shall have the same character repertoire as the subject data type. ii) If the subject data type is bit string, then the shall be a or a . If the length of the subject data type is fixed, then the length in bits of the or shall not be greater than the length of the sub- ject data type. If the length of the subject data type is 266 Database Language SQL X3H2-92-154/DBL CBR-002 11.5 variable, then the length in bits of the or shall not be greater than the maximum length of the subject data type. iii) If the subject data type is exact numeric, then the shall be a that simply contains an . There shall be a rep- resentation of the value of the in the subject data type that does not lose any significant digits. iv) If the subject data type is approximate numeric, then the shall be a . v) If the subject data type is datetime, then the shall be a and shall contain the same s as the subject data type. vi) If the subject data type is interval, then the shall be an and shall contain the same as the subject data type. b) If CURRENT_USER, SESSION_USER, or SYSTEM_USER is specified, then the subject data type shall be character string with character set SQL_TEXT. If the length of the subject data type is fixed, then its length shall not be less than 128 characters. If the length of the subject data type is vari- able, then its maximum length shall not be less than 128 characters. c) If is specified, then the subject data type shall be datetime with the same datetime type as the datetime data type of the . Access Rules None. General Rules 1) The default value inserted in the column descriptor, if the is to apply to a column, or in the domain de- scriptor, if the is to apply to a domain, is as follows: Case: a) If the contains NULL, then the null value. b) If the contains a , then Case: i) If the subject data type is numeric, then the numeric value of the . Schema definition and manipulation 267 X3H2-92-154/DBL CBR-002 11.5 ii) If the subject data type is character string with variable length, then the value of the . iii) If the subject data type is character string with fixed length, then the value of the , extended as neces- sary on the right with s to the length in characters of the subject data type. iv) If the subject data type is bit string with variable length, then the value of the . v) If the subject data type is bit string with fixed length, then the value of the extended as necessary on the right with 0-valued bits to the length of the subject data type and a completion condition is raised: warning- implicit zero-bit padding. vi) If the subject data type is datetime or interval, then the value of the . c) If the specifies CURRENT_USER, SESSION_USER, or SYSTEM_USER, then Case: i) If the subject data type is character string with variable length, then the value specified by CURRENT_USER, SESSION_ USER, or SYSTEM_USER. ii) If the subject data type is character string with fixed length, then the value specified by CURRENT_USER, SESSION_ USER, or SYSTEM_USER, extended as necessary on the right with s to the length in characters of the subject data type. d) If the contains a , then the value of an implicit reference to the . 2) The default value of a column is Case: a) If the column descriptor of a column includes a default value derived from a , then the value of that . b) If the column descriptor includes a domain name that iden- tifies a domain descriptor that includes a default value derived from a , then the value of that . c) Otherwise, the null value. 268 Database Language SQL X3H2-92-154/DBL CBR-002 11.5 Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not specify a , SYSTEM_USER, SESSION_USER, or CURRENT_USER. Schema definition and manipulation 269 X3H2-92-154/DBL CBR-002 11.6 11.6 Function Specify an integrity constraint. Format ::= [ ] [ ] ::= | | Syntax Rules 1) If is not specified, then INITIALLY IMMEDIATE NOT DEFERRABLE is implicit. 2) If is not specified, then a that contains an implementation- dependent is implicit. The assigned shall obey the Syntax Rules of an explicit . Access Rules None. General Rules 1) A defines a table constraint. 2) A table constraint descriptor is created that describes the table constraint being defined. The table constraint descriptor includes the contained in the explicit or implicit . The table constraint descriptor includes an indication of whether the constraint is deferrable or not deferrable and whether the initial constraint mode of the constraint is de- ferred or immediate. Case: a) If is specified, then the table constraint descriptor is a unique constraint descriptor that includes an indication of whether it was defined with PRIMARY KEY or UNIQUE, and the names of the unique columns specified in the . 270 Database Language SQL X3H2-92-154/DBL CBR-002 11.6 b) If is specified, then the table constraint descriptor is a referential constraint de- scriptor that includes the names of the referencing columns specified in the and the names of the referenced columns and referenced table specified in the , the value of the , if specified, and the , if specified. c) If is specified, then the table constraint descriptor is a table check constraint descriptor that includes the . 3) If the is a , then let SC be the immediately contained in the and let T be the table name included in the corresponding table constraint descriptor; the table constraint is not satisfied if and only if EXISTS ( SELECT * FROM T WHERE NOT ( SC ) ) is true. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Intermediate SQL language shall contain no . Schema definition and manipulation 271 X3H2-92-154/DBL CBR-002 11.7 11.7 Function Specify a uniqueness constraint for a table. Format ::= ::= UNIQUE | PRIMARY KEY ::= Syntax Rules 1) Let T be the table identified by the containing or . Let TN be the of T. 2) Let UCL be the of the . 3) Case: a) If the specifies PRIMARY KEY, then let SC be the : UNIQUE ( SELECT UCL FROM TN ) AND ( UCL ) IS NOT NULL b) Otherwise, let SC be the : UNIQUE ( SELECT UCL FROM TN ) 4) Each in the shall identify a column of T, and the same column shall not be identified more than once. 5) A shall specify at most one implicit or ex- plicit that specifies PRIMARY KEY. 6) If a that specifies PRIMARY KEY is contained in an , then the table identified by the immediately contained in the containing shall not have a unique constraint that was defined by a that specified PRIMARY KEY. 272 Database Language SQL X3H2-92-154/DBL CBR-002 11.7 7) The set of columns in the shall be dis- tinct from the unique columns of any other unique constraint descriptor that is included in the base table descriptor of T. Access Rules None. General Rules 1) A defines a unique constraint. Note: Subclause 10.6, " and ", specifies when a constraint is effectively checked. 2) The unique constraint is not satisfied if and only if EXISTS ( SELECT * FROM TN WHERE NOT ( SC ) ) is true. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) If PRIMARY KEY or UNIQUE is specified, then the for each column whose is in the shall specify NOT NULL. Schema definition and manipulation 273 X3H2-92-154/DBL CBR-002 11.8 11.8 Function Specify a referential constraint. Format ::= FOREIGN KEY ::= REFERENCES [ MATCH ] [ ] ::= FULL | PARTIAL ::= ::= [ ] ::= ::= [ ] | [ ] ::= ON UPDATE ::= ON DELETE ::= CASCADE | SET NULL | SET DEFAULT | NO ACTION Syntax Rules 1) Let referencing table be the table identified by the containing or . Let referenced table be the table identified by the in the . Let referencing columns be the column or columns identified by the in the 274 Database Language SQL X3H2-92-154/DBL CBR-002 11.8 and let referencing column be one such column. 2) Case: a) If the specifies a , then the set of column names of that shall be equal to the set of column names in the unique columns of a unique constraint of the refer- enced table. Let referenced columns be the column or columns identified by that and let refer- enced column be one such column. Each referenced column shall identify a column of the referenced table and the same column shall not be identified more than once. b) If the does not specify a , then the table descriptor of the referenced table shall include a unique constraint that spec- ifies PRIMARY KEY. Let referenced columns be the column or columns identified by the unique columns in that unique con- straint and let referenced column be one such column. The shall be considered to implic- itly specify a that is identical to that . 3) The table constraint descriptor describing the whose identifies the referenced columns shall indicate that the unique constraint is not deferrable. 4) The referenced table shall be a base table. Case: a) If the referencing table is a persistent base table, then the referenced table shall be a persistent base table. b) If the referencing table is a global temporary table, then the referenced table shall be a global temporary table. c) If the referencing table is a created local temporary table, then the referenced table shall be either a global temporary table or a created local temporary table. d) If the referencing table is a declared local temporary table, then the referenced table shall be either a global temporary table, a created local temporary table or a declared local temporary table. 5) If the referenced table is a temporary table with ON COMMIT DELETE ROWS specified, then the referencing table shall specify ON COMMIT DELETE ROWS. Schema definition and manipulation 275 X3H2-92-154/DBL CBR-002 11.8 6) Each referencing column shall identify a column of the referenc- ing table, and the same column shall not be identified more than once. 7) The shall contain the same number of s as the . The i-th col- umn identified in the corresponds to the i-th column identified in the . The data type of each referencing column shall be the same as the data type of the corresponding referenced column. 8) If a does not specify any , then an with a of NO ACTION is implicit. 9) If a does not specify any , then a with a of NO ACTION is implicit. Access Rules 1) The applicable privileges shall include REFERENCES for each referenced column. General Rules 1) A defines a referential constraint. Note: Subclause 10.6, " and ", specifies when a constraint is effectively checked. 2) Let Rf be the referencing columns and let Rt be the referenced columns in the referenced table T. The referencing table and the referenced table satisfy the referential constraint if and only if: Case: a) A is not specified and for each row of the ref- erencing table, the Rf MATCH (SELECT Rt FROM T) is true. b) PARTIAL is specified and for each row of the referencing table, the Rf MATCH PARTIAL (SELECT Rt FROM T) is true. c) FULL is specified and for each row of the referencing table, the Rf MATCH FULL (SELECT Rt FROM T) 276 Database Language SQL X3H2-92-154/DBL CBR-002 11.8 is true. 3) Case: a) If is not specified or if FULL is specified, then for a given row in the referenced table, let matching rows be all rows in the referencing table whose referenc- ing column values equal the corresponding referenced column values for the referential constraint. b) If PARTIAL is specified, then: i) For a given row in the referenced table, let matching rows be all rows in the referencing table that have at least one non-null referencing column value and whose non-null ref- erencing column values equal the corresponding referenced column values for the referential constraint. ii) For a given row in the referenced table, let unique match- ing rows be all matching rows for that given row that are matching rows only to the given row in the referenced table for the referential constraint. For a given row in the ref- erenced table, let non-unique matching rows be all matching rows for that given row that are not unique matching rows for that given row for the referential constraint. 4) For every row of the referenced table, its matching rows, unique matching rows, and non-unique matching rows are determined imme- diately before the execution of any SQL-statement. No new match- ing rows are added during the execution of that SQL-statement. The association between a referenced row and a non-unique match- ing row is dropped during the execution of that SQL-statement if the referenced row is either marked for deletion or updated to a distinct value on any referenced column that corresponds to a non-null referencing column. This occurs immediately after such a mark for deletion or update of the referenced row. Unique matching rows and non-unique matching rows for a referenced row are evaluated immediately after dropping the association between that referenced row and a non-unique matching row. 5) If a is specified and a row of the referenced table that has not previously been marked for deletion is marked for deletion, then Case: a) If is not specified or if FULL is specified, then Case: i) If the specifies CASCADE, then all matching rows are marked for deletion. Schema definition and manipulation 277 X3H2-92-154/DBL CBR-002 11.8 ii) If the specifies SET NULL, then in all match- ing rows each referencing column is set to the null value. iii) If the specifies SET DEFAULT, then in all matching rows each referencing column is set to the default value specified in the General Rules of Subclause 11.5, "". b) If PARTIAL is specified, then Case: i) If the specifies CASCADE, then all unique matching rows are marked for deletion. ii) If the specifies SET NULL, then in all unique matching rows each referencing column is set to the null value. iii) If the specifies SET DEFAULT, then in all unique matching rows each referencing column is set to the default value specified in the General Rules of Subclause 11.5, "". Note: Otherwise, the is not performed. 6) If an is specified and a non-null value of a ref- erenced column in the referenced table is updated to a value that is distinct from the current value of that column, then Case: a) If is not specified or if FULL is specified, then Case: i) If the specifies CASCADE, then in all match- ing rows the referencing column that corresponds with the referenced column is updated to the new value of the refer- enced column. ii) If the specifies SET NULL, then Case: 1) If is not specified, then in all matching rows the referencing column that corresponds with the referenced column is set to the null value. 2) If specifies FULL, then in all matching rows each referencing column is set to the null value. iii) If the specifies SET DEFAULT, then in all matching rows the referencing column that corresponds with the referenced column is set to the default value specified 278 Database Language SQL X3H2-92-154/DBL CBR-002 11.8 in the General Rules of Subclause 11.5, "". b) If PARTIAL is specified, then Case: i) If the specifies CASCADE, then for each unique matching row that contains a non-null value in the referencing column C1 that corresponds with the updated referenced column C2, C1 is updated to the new value V of C2, provided that, in all updated rows in the referenced table that formerly had, in the same SQL-statement, that unique matching row as a matching row, the values in C2 have all been updated to a value that is not distinct from V. Otherwise, an exception condition is raised: triggered data change violation. Note: Because of the Rules of Subclause 8.2, "", on which the definition of "distinct" re- lies, the values in C2 may have been updated to values that are not distinct, yet are not identical. Which of these non-distinct values is used for the cascade operation is implementation-dependent. ii) If the specifies SET NULL, then in all unique matching rows that contain a non-null value in the ref- erencing column that corresponds with the updated column, that referencing column is set to the null value. iii) If the specifies SET DEFAULT, then in all unique matching rows that contain a non-null value in the referencing column that corresponds with the updated col- umn, that referencing column is set to the default value specified in the General Rules of Subclause 11.5, "". Note: Otherwise, the is not performed. 7) If any attempt is made within an SQL-statement to update some data item to a value that is distinct from the value to which that data item was previously updated within the same SQL- statement, then an exception condition is raised: triggered data change violation. 8) If an attempts to update a row that has been deleted by any that identifies some cursor CR that is still open or updated by any that identifies some cursor CR that is still open or if a attempts to mark for deletion such a row, then a completion condition is raised: warning- cursor operation conflict. Schema definition and manipulation 279 X3H2-92-154/DBL CBR-002 11.8 9) All rows that are marked for deletion are effectively deleted at the end of the SQL-statement, prior to the checking of any integrity constraints. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) A shall not specify MATCH. b) A shall not contain an . c) The order of the column names in a shall be the same as the order of column names of the corre- sponding unique constraint of the referenced table. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not contain a . 280 Database Language SQL X3H2-92-154/DBL CBR-002 11.9 11.9 Function Specify a condition for the SQL-data. Format ::= CHECK Syntax Rules 1) The shall not contain a or a . 2) The shall not contain a that is not contained in a . 3) If is contained in a or , then let T be the table identified by the containing or . Case: a) If T is a persistent base table, or if the is contained in a or , then no generally con- tained in the shall reference a temporary table. b) If T is a global temporary table, then no generally contained in the shall reference a table other than a global temporary table. c) If T is a created local temporary table, then no generally contained in the shall reference a table other than either a global temporary table or a created local temporary table. d) If T is a declared local temporary table, then no generally contained in the shall reference a persistent base table. 4) If the is contained in a that defines a temporary table and specifies ON COMMIT PRESERVE ROWS or a that specifies ON COMMIT PRESERVE ROWS, then no in the shall reference a temporary table defined by a or a that specifies ON COMMIT DELETE ROWS. Schema definition and manipulation 281 X3H2-92-154/DBL CBR-002 11.9 5) The shall not generally contain a or a that is CURRENT_USER, SESSION_USER, or SYSTEM_USER. 6) The shall not generally contain a or a that is possibly non- deterministic. Access Rules 1) Let TN be any referenced in the . Case: a) If a is contained in the , then the applicable privileges shall include REFERENCES for each of the table identified by TN contained in the . b) Otherwise, the applicable privileges shall include REFERENCES for at least one column of the table identified by TN. General Rules 1) A defines a check constraint. Note: Subclause 10.6, " and ", specifies when a constraint is effectively checked. The General Rules that control the evaluation of a check constraint can be found in either Subclause 11.6, " ", or Subclause 11.21, "", depending on whether it forms part of a table constraint or a domain constraint. 2) If the character representation of the cannot be represented in the Information Schema without truncation, then a completion condition is raised: warning-search condition too long for information schema. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) The contained in a shall not contain a . b) The REFERENCES privilege is not required for access. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 282 Database Language SQL X3H2-92-154/DBL CBR-002 11.10 11.10 Function Change the definition of a table. Format ::= ALTER TABLE ::= | | | | Syntax Rules 1) Let T be the table identified by the . 2) The schema identified by the explicit or implicit schema name of the shall include the descriptor of T. 3) The scope of the is the entire . 4) T shall be a base table. 5) T shall not be a declared local temporary table. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the table identified by . General Rules 1) The base table descriptor of T is modified as specified by . Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain an . Schema definition and manipulation 283 X3H2-92-154/DBL CBR-002 11.11 11.11 Function Add a column to a table. Format ::= ADD [ COLUMN ] Syntax Rules None. Access Rules None. General Rules 1) The column defined by the is added to T. 2) Let C be the column added to T. Every value in C is the default value for C. Note: The default value of a column is defined in Subclause 11.5, "". Note: The addition of a column to a table has no effect on any existing included in a view descriptor or included in constraint descriptor because any implicit s in these clauses are replaced by explicit s when the clause is originally evaluated. See the Syntax Rules of Subclause 7.10, "". 3) For every table privilege descriptor that specifies T and a privilege of SELECT, UPDATE, INSERT or REFERENCES, a new col- umn privilege descriptor is created that specifies T, the same action, grantor, and grantee, and the same grantability, and specifies the of the . 4) In all other respects, the specification of a in an has the same effect as specification of the in the for T would have had. In particular, the degree of T is increased by 1 and the ordinal position of that column is equal to the new degree of T as specified in the General Rules of Subclause 11.4, "". 284 Database Language SQL X3H2-92-154/DBL CBR-002 11.11 Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain an . Schema definition and manipulation 285 X3H2-92-154/DBL CBR-002 11.12 11.12 Function Change a column and its definition. Format ::= ALTER [ COLUMN ] ::= | Syntax Rules 1) Let T be the table identified in the containing . 2) Let C be the column identified by the . 3) C shall be a column of T. Access Rules None. General Rules 1) The column descriptor of C is modified as specified by . Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain an . 286 Database Language SQL X3H2-92-154/DBL CBR-002 11.13 11.13 Function Set the default clause for a column. Format ::= SET Syntax Rules None. Access Rules None. General Rules 1) Let C be the column identified by the in the con- taining . 2) The default value specified by the is placed in the column descriptor of C. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . Schema definition and manipulation 287 X3H2-92-154/DBL CBR-002 11.14 11.14 Function Drop the default clause from a column. Format ::= DROP DEFAULT Syntax Rules 1) Let C be the column identified by the in the con- taining . 2) The descriptor of C shall include a default value. Access Rules None. General Rules 1) The default value is removed from the column descriptor of C. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . 288 Database Language SQL X3H2-92-154/DBL CBR-002 11.15 11.15 Function Destroy a column. Format ::= DROP [ COLUMN ] Syntax Rules 1) Let T be the table identified by the in the con- taining and let TN be the name of T. 2) Let C be the column identified by the CN. 3) C shall be a column of T and C shall not be the only column of T. 4) If RESTRICT is specified, then C shall not be referenced in the of any view descriptor or in the of any constraint descriptor other than a table con- straint descriptor that contains references to no other column and that is included in the table descriptor of T. Note: A that does not specify CASCADE will fail if there are any references to that column resulting from the use of CORRESPONDING, NATURAL, SELECT * (except where contained in an exists predicate>), or REFERENCES without a in its . Note: If CASCADE is specified, then any such dependent object will be dropped by the execution of the spec- ified in the General Rules of this Subclause. Access Rules None. General Rules 1) Let A be the current . The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: REVOKE INSERT(CN), UPDATE(CN), REFERENCES(CN) ON TABLE TN FROM A CASCADE Schema definition and manipulation 289 X3H2-92-154/DBL CBR-002 11.15 2) Let VN be the name of any view that contains a reference to column C of table T. The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: DROP VIEW VN CASCADE 3) If the column is not based on a domain, then its data type de- scriptor is destroyed. 4) The data associated with C is destroyed and the descriptor of C is removed from the descriptor of T. 5) The identified column and its descriptor are destroyed. 6) The degree of T is reduced by 1. The ordinal position of all columns having an ordinal position greater than the ordinal position of C is reduced by 1. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . 290 Database Language SQL X3H2-92-154/DBL CBR-002 11.16 11.16 Function Add a constraint to a table. Format ::= ADD Syntax Rules None. Access Rules None. General Rules 1) Let T be the table identified by the in the con- taining . 2) The table constraint descriptor for the is included in the table descriptor for T. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain an . Schema definition and manipulation 291 X3H2-92-154/DBL CBR-002 11.17 11.17 Function Destroy a constraint on a table. Format ::= DROP CONSTRAINT Syntax Rules 1) Let T be the table identified by the in the con- taining . The schema identified by the explicit or implicit schema name of the shall in- clude the descriptor of T. 2) The shall identify a table constraint TC of T. 3) If TC is a unique constraint and there exists a referential constraint RC whose referenced table is T and whose referenced columns are the unique columns of TC, then RC is said to be dependent on TC. 4) If RESTRICT is specified, then no table constraint shall be dependent on TC. Note: If CASCADE is specified, then any such dependent object will be dropped by the effective execution of the specified in the General Rules of this Subclause. Access Rules None. General Rules 1) Let TCN2 be the of any table constraint that is dependent on TC and let T2 be the of the ta- ble descriptor that includes TCN2. The following is effectively executed without further Access Rule checking: ALTER TABLE T2 DROP CONSTRAINT TCN2 CASCADE 2) The descriptor of TC is removed from the descriptor of T. 3) The identified table constraint and its descriptor are de- stroyed. 292 Database Language SQL X3H2-92-154/DBL CBR-002 11.17 Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . Schema definition and manipulation 293 X3H2-92-154/DBL CBR-002 11.18 11.18 Function Destroy a table. Format ::= DROP TABLE Syntax Rules 1) Let T be the table identified by the and let TN be that . The schema identified by the explicit or implicit schema name of TN shall include the descriptor of T. 2) T shall be a base table. 3) T shall not be a declared local temporary table. 4) If RESTRICT is specified, then T shall not be referenced in the of any view descriptor or the of any constraint descriptor. Note: If CASCADE is specified, then such referencing objects will be dropped by the execution of the spec- ified in the General Rules of this Subclause. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the table identified by TN. General Rules 1) Let A be the current . The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: REVOKE ALL PRIVILEGES ON TN FROM A CASCADE 2) The identified base table and its descriptor are destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 294 Database Language SQL X3H2-92-154/DBL CBR-002 11.18 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . Schema definition and manipulation 295 X3H2-92-154/DBL CBR-002 11.19 11.19 Function Define a viewed table. Format ::= CREATE VIEW [ ] AS [ WITH [ ] CHECK OPTION ] ::= CASCADED | LOCAL ::= Syntax Rules 1) The shall not contain a or a . 2) If a is contained in a and the contains a , then that shall be the same as the specified or implicit of the containing . 3) The schema identified by the explicit or implicit schema name of the shall not include a table descriptor whose table name is . 4) The viewed table defined by shall not be identified by any generally contained in the . 5) Any that is specified in the shall be different from the of any . 6) If the is updatable, then the viewed table is an updatable table. Otherwise, it is a read-only table. 7) If the is a that con- tains a or a that is not con- tained in a , then the viewed table defined by the is a grouped view. 8) If any two columns in the table specified by the have the same , or if any column of that table has an implementation-dependent name, then a shall be specified. 296 Database Language SQL X3H2-92-154/DBL CBR-002 11.19 9) The same shall not be specified more than once in the . 10)The number of s in the shall be the same as the degree of the table specified by the . 11)No column in the table specified by shall have a coercibility attribute of No collating sequence. Note: The coercibility attribute is described in Subclause 4.2.3, "Rules determining collating sequence usage". Note: The coercibility attribute for references to the column is defined in Subclause 6.4, "". 12)If WITH CHECK OPTION is specified, then the viewed table shall be updatable. 13)If WITH CHECK OPTION is specified with no , then a of CASCADED is implicit. 14)Let V be the view defined by the . The un- derlying columns of every i-th column of V are the underlying columns of the i-th column of the and the underlying columns of V are the underlying columns of the . Access Rules 1) If a is contained in a , then the current shall be equal to the that owns the schema identified by the implicit or explicit of the . General Rules 1) A view descriptor VD is created that describes V. The view de- scriptor includes the , the , column descriptors taken from the table specified by the , and an indication of whether WITH CHECK OPTION was specified. If a is specified, then the of the i-th column of the view is the i-th in that . Otherwise, the s of the view are the s of the table specified by the . 2) Let VN be the . Let QE be the . If a is specified, then let VCL be the preceded by a and followed by a ; otherwise, let VCL be the empty string. Case: a) When VN is immediately contained in some SQL-schema state- ment, it identifies the view descriptor VD. Schema definition and manipulation 297 X3H2-92-154/DBL CBR-002 11.19 b) Otherwise, VN references the same table as the : ( QE ) AS VN VCL 3) Let A be the that owns V. 4) A set of privilege descriptors is created that defines the priv- ilege SELECT on this table to A and SELECT for each column of V to A. This privilege is grantable if and only if the appli- cable SELECT privileges on all s contained in the are grantable. The grantor of this privilege descriptor is set to the special grantor value "_SYSTEM". 5) If V is updatable, then let T be the leaf underlying table of the . 6) For i ranging from 1 to the number of distinct leaf underlying tables of the of V, let RTi be the s of those tables. For every column CV of V: a) Let CRij, for j ranging from 1 to the number of columns of RTi that are underlying columns of CV, be the s of those columns. b) If A has REFERENCES(CRij) for all i and for all j, and A has REFERENCES on some column of RTi for all i, then a privilege descriptor is created that defines the privilege REFERENCES (CV) on V to A. That privilege is grantable if and only if the REFERENCES privileges on all of the columns CRTij are grantable. The grantor of that privilege descriptor is set to the special grantor value "_SYSTEM". 7) If V is updatable, then: a) A set of privilege descriptors is created that defines the privileges INSERT, UPDATE, and DELETE on V that are appli- cable privileges on T to A. A privilege on V is grantable if and only if the corresponding privilege on T is grantable. b) For every column in V: i) There is a corresponding column in T from which the column of V is derived. Let CV and CT be the s of the corresponding columns of V and T respectively. ii) A set of privilege descriptors is created that defines the privileges INSERT(CV) and UPDATE(CV) on V, where the privileges INSERT(CT) and UPDATE(CT) on T are the appli- cable privileges to A, respectively. A privilege on V is grantable if and only if the corresponding privilege on T is grantable. The grantor of these privilege descriptors is set to the special grantor value "_SYSTEM". 298 Database Language SQL X3H2-92-154/DBL CBR-002 11.19 8) If V is updatable, then let TLEAF be the leaf generally under- lying table of V. For every row in V there is a corresponding row in TLEAF from which the row of V is derived and for each column in V there is a corresponding column in TLEAF from which the column of V is derived. The insertion of a row into V is an insertion of a corresponding row into TLEAF. The deletion of a row from V is a deletion of the corresponding row in TLEAF. The updating of a column of a row in V is an updating of the corresponding column of the corresponding row in TLEAF. 9) Let V1 be a view. V1 spans V1. V1 spans a view V2 if V2 is a generally underlying table of V1. 10)An update operation is an , , , , or . An update operation on a view V is an update operation whose identifies V. 11)If a view V1 spans a view VA described by a view descriptor that includes WITH CHECK OPTION and an update operation on V1 would result in a row that would not appear in the result of VA, then a) If the view descriptor of VA includes CASCADED, then an ex- ception condition is raised: with check option violation. b) If the view descriptor of VA includes LOCAL and the update operation would result in a row that would appear in the simply underlying table of the simply underlying table of the contained in VA, then an exception condition is raised: with check option violation. 12)Validation of a WITH CHECK OPTION constraint is effectively performed at the end of each update operation. 13)If the character representation of the cannot be represented in the Information Schema without truncation, then a completion condition is raised: warning-query expression too long for information schema. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any , but the effect shall be that defined for a of CASCADED. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) The in a shall be a . Schema definition and manipulation 299 X3H2-92-154/DBL CBR-002 11.20 11.20 Function Destroy a view. Format ::= DROP VIEW Syntax Rules 1) Let V be the table identified by the and let VN be that . The schema identified by the explicit or implicit schema name of VN shall include the descriptor of V. 2) V shall be a viewed table. 3) If RESTRICT is specified, then V shall not be referenced in the of any view descriptor or the of any assertion descriptor or constraint descriptor. Note: If CASCADE is specified, then any such dependent object will be dropped by the execution of the spec- ified in the General Rules of this Subclause. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the table identified by VN. General Rules 1) Let A be the current . The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: REVOKE ALL PRIVILEGES ON VN FROM A CASCADE 2) The identified view and its descriptor are destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . 300 Database Language SQL X3H2-92-154/DBL CBR-002 11.21 11.21 Function Define a domain. Format ::= CREATE DOMAIN [ AS ] [ ] [ ... ] [ ] ::= [ ] [ ] Syntax Rules 1) If a is contained in a , and if the contains a , then that shall be the same as the specified or implicit of the containing . The schema identified by the explicit or implicit schema name of the shall not include a domain descriptor whose domain name is . 2) If specifies CHARACTER or CHARACTER VARYING and does not specify , then the character set name of the default character set of the schema identified by the implicit or explicit of is implicit. 3) If specifies a that identi- fies a character set that has a default collation and the does not directly contain a , then the collation of the is the im- plicit collation of the domain. 4) Let D1 be some domain. D1 is in usage by a domain constraint DC if and only if the of DC generally contains the either of D1 or of some domain D2 such that D1 is in usage by some domain constraint of D2. No domain shall be in usage by any of its own constraints. 5) If is specified, then shall be a character string type. 6) for every is specified: a) If is not specified, then INITIALLY IMMEDIATE NOT DEFERRABLE is implicit. Schema definition and manipulation 301 X3H2-92-154/DBL CBR-002 11.21 b) If is not specified, then a that contains an implementation- dependent is implicit. The assigned shall obey the Syntax Rules of an explicit . Access Rules 1) If a is contained in a , then the current shall be equal to the that owns the schema identified by the implicit or explicit of the . General Rules 1) A defines a domain. Note: Subclause 10.6, " and ", specifies when a constraint is effectively checked. 2) A data type descriptor is created that describes the data type of the domain being created. 3) A domain descriptor is created that describes the domain being created. The domain descriptor contains the name of the domain, the data type descriptor of the data type, the of the if the contains a , the value of the if the immediately contains , and a domain constraint descriptor for every immediately contained . 4) A privilege descriptor is created that defines the USAGE priv- ilege on this domain to the of the or in which the appears. This privilege is grantable if and only if the applicable privi- leges include a grantable REFERENCES privilege for each included in the domain descriptor and a grantable USAGE privilege for each , , , and contained in the of any domain constraint descriptor included in the domain descriptor, and a grantable USAGE privilege for the contained in the included in the domain descriptor. The grantor of the privilege descriptor is set to the special grantor value "_SYSTEM". 5) Let DSC be the included in some domain con- straint descriptor DCD. Let D be the name of the domain whose descriptor includes DCD. Let T be the name of some table whose descriptor includes some column descriptor with column name C whose domain name is D. Let CSC be a copy of DSC in which every instance of the VALUE is replaced by C. 302 Database Language SQL X3H2-92-154/DBL CBR-002 11.21 6) The domain constraint specified by DCD for C is not satisfied if and only if EXISTS ( SELECT * FROM T WHERE NOT ( CSC ) ) is true. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . Schema definition and manipulation 303 X3H2-92-154/DBL CBR-002 11.22 11.22 Function Change a domain and its definition. Format ::= ALTER DOMAIN ::= | | | Syntax Rules 1) Let D be the domain identified by . The schema identified by the explicit or implicit schema name of the shall include the descriptor of D. Access Rules 1) The current shall be equal to the that owns the schema identified by the implicit or explicit of . General Rules 1) The domain descriptor of D is modified as specified by . Note: The changed domain descriptor of D is applicable to every column that is dependent on D. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 304 Database Language SQL X3H2-92-154/DBL CBR-002 11.23 11.23 Function Set the default value in a domain. Format ::= SET Syntax Rules None. Access Rules None. General Rules 1) Let D be the domain identified by the in the con- taining . 2) The default value specified by the is placed in the domain descriptor of D. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Schema definition and manipulation 305 X3H2-92-154/DBL CBR-002 11.24 11.24 Function Remove the default clause of a domain. Format ::= DROP DEFAULT Syntax Rules 1) Let D be the domain identified by the in the con- taining . 2) The descriptor of D shall contain a default value. Access Rules None. General Rules 1) Let C be the set of columns whose column descriptors contain the domain descriptor of D. 2) For every column belonging to C, if the column descriptor does not already contain a default value, then the default value from the domain descriptor of D is placed in that column descriptor. 3) The default value is removed from the domain descriptor of D. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 306 Database Language SQL X3H2-92-154/DBL CBR-002 11.25 11.25 Function Add a constraint to a domain. Format ::= ADD Syntax Rules 1) Let D be the domain identified by the in the con- taining . 2) Let D1 be some domain. D1 is in usage by a domain constraint DC if and only if the of DC generally contains the either of D1 or of some domain D2 such that D1 is in usage by some domain constraint of D2. No domain shall be in usage by any of its own constraints. Access Rules None. General Rules 1) The constraint descriptor of the is added to the domain descriptor of D. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Schema definition and manipulation 307 X3H2-92-154/DBL CBR-002 11.26 11.26 Function Destroy a constraint on a domain. Format ::= DROP CONSTRAINT Syntax Rules 1) Let D be the domain identified by the in the con- taining . 2) Let DC be the descriptor of the constraint identified by . 3) DC shall be included in the domain descriptor of D. Access Rules None. General Rules 1) The constraint descriptor of DC is removed from the domain de- scriptor of D. 2) The constraint DC and its descriptor are destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 308 Database Language SQL X3H2-92-154/DBL CBR-002 11.27 11.27 Function Destroy a domain. Format ::= DROP DOMAIN Syntax Rules 1) Let D be the domain identified by and let DN be that . The schema identified by the explicit or implicit schema name of DN shall include the descriptor of D. 2) If RESTRICT is specified, then D shall not be referenced by any column descriptor, in the of any view descriptor, or in the of any constraint de- scriptor. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the domain identified by DN. Let UA be the of the current SQL-session. General Rules 1) Let C be any column descriptor that includes DN, let T be the table described by the table descriptor that includes C, and let TN be the column name of T. C is modified as follows: a) DN is removed from C. A copy of the data type descriptor of D is included in C. b) If C does not include a and the domain de- scriptor of D includes a , then a copy of the of D is included in C. c) For every domain constraint descriptor included in the domain descriptor of D: i) Let TCD be a consisting of a whose is implementation-dependent, whose is derived from the of the do- main constraint descriptor by replacing every instance of VALUE by the of C, and whose are the of the domain constraint descriptor. Schema definition and manipulation 309 X3H2-92-154/DBL CBR-002 11.27 ii) If the applicable privileges of UA include all of the priv- ileges necessary for UA to successfully execute the ALTER TABLE TN ADD TCD then the following is effec- tively executed with a current of UA: ALTER TABLE TN ADD TCD d) If C does not include a collation and the of D includes a collation, then i) Let CCN be the of the collation. ii) If the applicable privileges for UA contain USAGE on CCN, then CCN is added to C as the . 2) Let A be the current . The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: REVOKE USAGE ON DOMAIN DN FROM A CASCADE 3) The identified domain is destroyed by destroying its descriptor and its data type descriptor. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . 310 Database Language SQL X3H2-92-154/DBL CBR-002 11.28 11.28 Function Define a character set. Format ::= CREATE CHARACTER SET [ AS ] [ | ] ::= GET ::= | | ::= ::= COLLATION FROM Syntax Rules 1) If a is contained in a and if the immediately contained in the contains a , then that shall be the same as the specified or implicit of the . 2) The schema identified by the explicit or implicit schema name of the shall not include a character set descriptor whose character set name is . 3) A shall identify some character set descriptor. 4) If neither nor is specified, then the following is implicit: COLLATION FROM DEFAULT Schema definition and manipulation 311 X3H2-92-154/DBL CBR-002 11.28 Access Rules 1) If a is contained in a , then the current shall be equal to the that owns the schema identified by the implicit or explicit of the . 2) The applicable privileges for the shall include USAGE. General Rules 1) A defines a character set. 2) A character set descriptor is created for the defined character set. 3) The character set has the same character repertoire as the char- acter set identified by the . 4) A privilege descriptor is created that defines the USAGE privi- lege on this character set to the of the schema or in which the appears. The grantor of the privilege descriptor is set to the special grantor value "_SYSTEM". This privilege is grantable. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) In conforming Intermediate SQL language, shall specify DEFAULT. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not specify a . 312 Database Language SQL X3H2-92-154/DBL CBR-002 11.29 11.29 Function Destroy a character set. Format ::= DROP CHARACTER SET Syntax Rules 1) Let C be the character set identified by the and let CN be the name of C. 2) The schema identified by the explicit or implicit schema name of CN shall include the descriptor of C. 3) C shall not be referenced in the of any view descriptor or in the of any constraint de- scriptor, or be included in any collation descriptor or transla- tion descriptor. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the character set identified by C. General Rules 1) Let A be the current . The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: REVOKE USAGE ON CHARACTER SET CN FROM A CASCADE 2) The descriptor of C is destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall contain no . Schema definition and manipulation 313 X3H2-92-154/DBL CBR-002 11.30 11.30 Function Define a collating sequence. Format ::= CREATE COLLATION FOR FROM [ ] ::= NO PAD | PAD SPACE ::= | ::= | | DESC | DEFAULT ::= TRANSLATION [ THEN COLLATION ] ::= EXTERNAL ::= ::= | ::= ::= 314 Database Language SQL X3H2-92-154/DBL CBR-002 11.30 Syntax Rules 1) If a is contained in a and if the immediately contained in the contains a , then that shall be the same as the specified or implicit of the . 2) The schema identified by the explicit or implicit schema name of the shall not include a collation descriptor whose collation name is . 3) A shall be the name of a colla- tion defined by a national or international standard. An shall be the name of a collation that is implementation-defined. 4) The s and s that are supported are implementation-defined. Each collation identified by a or by a shall have associated with it a privilege descriptor that was effectively defined by the GRANT USAGE ON COLLATION COLL TO PUBLIC where COLL is the or . 5) A collating sequence specified by or shall be a collating sequence that is defined for the character repertoire of the character set with which the is associated. 6) A shall be the name of a collating se- quence that is defined in the schema identified by the explicit or implicit . 7) If a does not specify , then Case: a) If a is specified that con- tains a that identifies a collation for which the specifies NO PAD, then NO PAD is implicit. b) Otherwise, PAD SPACE is implicit. 8) If NO PAD is specified, then the collation is said to have the NO PAD attribute. If PAD SPACE is specified, then the collation is said to have the PAD SPACE attribute. Schema definition and manipulation 315 X3H2-92-154/DBL CBR-002 11.30 9) If is specified, then let T be the translation named by . Let C1 be the colla- tion being defined by the . The source character set of T shall be the same as the character set of C1. 10)If THEN COLLATION is specified, then let C2 be the collation named by in THEN COLLATION . The target character set of T shall be identical to the character set of C2. Access Rules 1) If a is contained in a , then the current shall be equal to the that owns the schema identified by the implicit or explicit of the . 2) Let C be a collation identified by any con- tained in . The applicable privileges shall include USAGE on C. 3) If is specified, then the applicable privi- leges shall include USAGE. General Rules 1) A defines a collating sequence. 2) DEFAULT specifies that the collation is to be performed us- ing the order of characters as they appear in the character repertoire. 3) If DESC is specified, then the collation is the reverse of that specified by . 4) A privilege descriptor is created that defines the USAGE priv- ilege on this collation to the current . The grantor of the privilege descriptor is set to the special grantor value "_SYSTEM". 5) This privilege descriptor is grantable if and only if the USAGE privilege for the current on the contained in the is also grantable and if the USAGE privilege for the current on the contained in the , if present, is also grantable. 6) If is specified, then Case: a) If THEN COLLATION is specified, then let C2 be the collating sequence named by the in THEN COLLATION . The collating sequence defined is obtained by effectively translating a character 316 Database Language SQL X3H2-92-154/DBL CBR-002 11.30 string using T, then applying the collating sequence of C2 to the result. b) Otherwise, the collating sequence defined is obtained by ef- fectively translating a character string using T, then apply- ing the default collating sequence for the target character set of T. 7) If is specified, then the collating se- quence defined is that given by: a) If is specified, then the national or international standard collation. b) Otherwise, the implementation-defined collation. 8) A collation descriptor is created for the defined collation. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Schema definition and manipulation 317 X3H2-92-154/DBL CBR-002 11.31 11.31 Function Destroy a collating sequence. Format ::= DROP COLLATION Syntax Rules 1) Let C be the collating sequence identified by the and let CN be the name of C. 2) The schema identified by the explicit or implicit schema name of CN shall include the descriptor of C. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the collating sequence identified by C. General Rules 1) Let A be the current . The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: REVOKE USAGE ON COLLATION CN FROM A CASCADE 2) Let CD be any collation descriptor that includes CN. CD is modi- fied by deleting any occurrences of "THEN COLLATION CN" or "DESC (CN)" 3) Let CSD be any character set descriptor that includes CN. CSD is modified by deleting any occurrences of "COLLATION FROM CN" or "DESC (CN)". 4) Let DD be any column descriptor or domain descriptor that includes CN. DD is modified by deleting any occurrences of "COLLATE CN". 5) Let VD be any view descriptor whose includes "COLLATE CN" or any constraint descriptor whose includes "COLLATE CN". VD is modified by deleting any occurrences of "COLLATE CN". 6) The descriptor of C is destroyed. 318 Database Language SQL X3H2-92-154/DBL CBR-002 11.31 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Schema definition and manipulation 319 X3H2-92-154/DBL CBR-002 11.32 11.32 Function Define a character translation. Format ::= CREATE TRANSLATION FOR TO FROM ::= ::= ::= ::= | IDENTITY | ::= EXTERNAL ::= | ::= ::= ::= Syntax Rules 1) If a is contained in a and if the immediately contained in the contains a , then that shall be the same as the specified or implicit of the . 320 Database Language SQL X3H2-92-154/DBL CBR-002 11.32 2) The schema identified by the explicit or implicit schema name of the shall not include a translation descrip- tor whose translation name is . 3) A shall be the name of a trans- lation defined by a national or international standard. An shall be the name of a translation that is implementation-defined. 4) The s and s that are supported are implementation- defined. Each translation identified by a or by a shall have associated with it a privilege descriptor that was effectively defined by the GRANT USAGE ON TRANSLATION TRANS TO PUBLIC where TRANS is the or . 5) A shall identify a translation de- scriptor. Access Rules 1) If a is contained in a , then the current shall be equal to the that owns the schema identified by the implicit or explicit of the . 2) If is specified, then the applicable privileges shall include USAGE. General Rules 1) A defines a translation. 2) IDENTITY specifies a translation that makes no changes to the characters. 3) A translation descriptor is created for the defined translation. 4) A privilege descriptor PD is created that defines the USAGE privilege on this translation to the of the schema or in which the appears. The grantor of the privilege descriptor is set to the special grantor value "_SYSTEM". 5) PD is grantable if and only if the USAGE privilege for the of the schema or in which the appears is also grantable on every contained in the . Schema definition and manipulation 321 X3H2-92-154/DBL CBR-002 11.32 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 322 Database Language SQL X3H2-92-154/DBL CBR-002 11.33 11.33 Function Destroy a character translation. Format ::= DROP TRANSLATION Syntax Rules 1) Let T be the translation identified by the and let TN be the name of T. 2) The schema identified by the explicit or implicit schema name of TN shall include the descriptor of T. 3) T shall not be referenced in the included in any view descriptor or in the included in any constraint descriptor or be included in any collation descriptor. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the translation identified by T. General Rules 1) Let CD be any collation descriptor that includes a TRANSLATION TN. CD is modified by deleting that . 2) Let CSD be any that references T. CSD is modified by deleting any occurrences of a that contains TN. 3) Let A be the current . The following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: REVOKE USAGE ON TRANSLATION TN FROM A CASCADE 4) The descriptor of T is destroyed. Schema definition and manipulation 323 X3H2-92-154/DBL CBR-002 11.33 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 324 Database Language SQL X3H2-92-154/DBL CBR-002 11.34 11.34 Function Specify an integrity constraint by means of an assertion and spec- ify the initial default time for checking the assertion. Format ::= CREATE ASSERTION [ ] ::= CHECK Syntax Rules 1) If an is contained in a and if the contains a , then that shall be the same as the explicit or implicit of the containing . 2) The schema identified by the explicit or implicit schema name of the shall not include a constraint descriptor whose constraint name is . 3) If is not specified, then INITIALLY IMMEDIATE NOT DEFERRABLE is implicit. 4) The shall not contain a or a . 5) No in the shall reference a temporary table. 6) The shall not generally contain a or a that is CURRENT_USER, SESSION_USER, or SYSTEM_USER. 7) The of shall be differ- ent from the of the of any other constraint defined in the same schema. 8) The shall not generally contain a or a that is possibly non- deterministic. Schema definition and manipulation 325 X3H2-92-154/DBL CBR-002 11.34 Access Rules 1) If an is contained in a , then the current shall be equal to the that owns the schema identified by the implicit or explicit of the of the . 2) Let TN be any referenced in the of the . If TN identifies a table described by a base table descriptor or a view descriptor, then Case: a) If a is contained in the , then the applicable privileges shall include REFERENCES for each CN of the table identified by TN, where CN is contained in the . b) Otherwise, the applicable privileges shall include REFERENCES for at least one column of the table identified by TN. General Rules 1) An defines an assertion. Note: Subclause 10.6, " and ", specifies when a constraint is effectively checked. 2) The assertion is not satisfied if and only if the result of evaluating the is false. 3) An assertion descriptor is created that describes the assertion being defined. The name included in the assertion descriptor is . The assertion descriptor includes an indication of whether the constraint is deferrable or not deferrable and whether the ini- tial constraint mode is deferred or immediate. The assertion descriptor includes the of the . 4) If the character representation of the cannot be represented in the Information Schema without truncation, then a completion condition is raised: warning-search condition too long for information schema. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 326 Database Language SQL X3H2-92-154/DBL CBR-002 11.34 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Schema definition and manipulation 327 X3H2-92-154/DBL CBR-002 11.35 11.35 Function Destroy an assertion. Format ::= DROP ASSERTION Syntax Rules 1) Let A be the assertion identified by and let AN be the name of A. 2) The schema identified by the explicit or implicit schema name of AN shall include the descriptor of A. Access Rules 1) The current shall be equal to the that owns the schema identified by the of the assertion identified by AN. General Rules 1) The descriptor of A is destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 328 Database Language SQL X3H2-92-154/DBL CBR-002 11.36 11.36 Function Define privileges. Format ::= GRANT ON TO [ { }... ] [ WITH GRANT OPTION ] ::= [ TABLE ] | DOMAIN | COLLATION | CHARACTER SET | TRANSLATION Syntax Rules 1) If specifies a , , , or , then shall specify USAGE; otherwise, USAGE shall not be specified. 2) Let O be the object identified by the . 3) Let A be the current . For each specified, a set of privilege descriptors is iden- tified. The privilege descriptors identified are those defining, for each explicitly or implicitly in , that on O held by A with grant option. Access Rules 1) The applicable privileges shall include a privilege identifying O. General Rules 1) The specify one or more privileges on the object identified by the . 2) For every identified privilege descriptor, a privilege descrip- tor is created that specifies the identical , , object O, and grantor A. Let CPD be the set of privilege de- scriptors created. 3) For every identified privilege descriptor whose action is SELECT, INSERT, UPDATE, or REFERENCES without a column name, privilege descriptors are also created for each column C in O Schema definition and manipulation 329 X3H2-92-154/DBL CBR-002 11.36 for which A holds the corresponding privilege with grant op- tion. For each such column, a privilege descriptor is created that specifies the identical , the identical , object C, and grantor A. 4) If WITH GRANT OPTION was specified, each privilege descriptor also indicates that the privilege is grantable. 5) If is specified, then let T be the table identified by the . 6) For every updatable view V owned by some grantee G such that T is some leaf underlying table of the of V: a) Let VN be the of V. b) If WITH GRANT OPTION is specified, then let WGO be "WITH GRANT OPTION"; otherwise, let WGO be a zero-length string. c) For every privilege descriptor PD in CPD, let PA be the ac- tion included in PD. i) If PA is INSERT, UPDATE, or DELETE, then the following is effectively executed as though the current were "_SYSTEM" and with- out further Access Rule checking: GRANT PA ON VN TO G WGO ii) If PA is A(CT), where A is INSERT or UPDATE and CT is the name of some column of T such that there is a correspond- ing column in V, named CVN, that is derived from CT, then the following is effectively executed as though the current were "_ SYSTEM" and without further Access Rule checking: GRANT A(CVN) ON VN TO G WGO 7) For every G and for every view V1 owned by G, if G has been granted SELECT privilege WITH GRANT OPTION on all tables identified by a contained in the of V1, then for every privilege descriptor with a P that contains SELECT, a of "_SYSTEM", of V1, and G that is not grantable, the following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: GRANT P ON V1 TO G WITH GRANT OPTION 8) For every G and for every domain D1 owned by G, if G has been granted REFERENCES privilege WITH GRANT OPTION on every column referenced in the included in a domain constraint descriptor included in the domain descriptor of D1 and a grantable USAGE privilege on all domains, character sets, collations, and translations whose s, set name>s, s, and s, respec- tively, are included in the domain descriptor, and a grantable USAGE privilege for the contained in the included in the domain descriptor, then for every privilege descriptor with USAGE, a of "_ SYSTEM", D1, and G that is not grantable, the following is effectively executed with a cur- rent of "_SYSTEM" and without further Access Rule checking: GRANT USAGE ON DOMAIN D1 TO G WITH GRANT OPTION 9) For every G and for every collation C1 owned by G, if the USAGE privilege of G for the character set identified by a contained in the of C1 is grantable, then for every privilege descriptor with a P, a of "_SYSTEM", of C1, and G that is not grantable, the following is effectively executed with a current of "_SYSTEM" and without further Access Rule checking: GRANT P ON COLLATION C1 TO G WITH GRANT OPTION 10)For every G and for every translation T1 owned by G, if the USAGE privilege of G for every character set identified by a contained in the of T1 is grantable, then for every privilege descriptor with a P, a of "_SYSTEM", of T1, and G that is not grantable, the fol- lowing is effectively executed as though the current were "_SYSTEM" and without further Access Rule checking: GRANT P ON TRANSLATION T1 TO G WITH GRANT OPTION 11)If is specified, then for each view V owned by some G such that T or some column CT of T, let RTi, for i ranging from 1 to the number of tables identified by the s contained in the of V, be the s of those tables. For every column CV of V: a) Let CRij, for j ranging from 1 to the number of columns of RTi that are underlying columns of CV, be the s of those columns. b) If WITH GRANT OPTION was specified, then let WGO be "WITH GRANT OPTION"; otherwise, let WGO be a zero-length string. c) If, following successful execution of the , G will have REFERENCES(CRTij) for all i and for all j, and A has REFERENCES on some column of RTi for all i, the the following is effectively executed as though Schema definition and manipulation 331 X3H2-92-154/DBL CBR-002 11.36 the current were "_SYSTEM" and without further Access Rule checking: GRANT REFERENCES CV ON V TO G WGO 12)If two privilege descriptors are identical except that one in- dicates that the privilege is grantable and the other indicates that the privilege is not grantable, then both privilege de- scriptors are set to indicate that the privilege is grantable. 13)Redundant duplicate privilege descriptors are removed from the multiset of all privilege descriptors. 14)For every combination of and on O specified in , if there is no corresponding privilege de- scriptor in the set of identified privilege descriptors, then a completion condition is raised: warning-privilege not granted. 15)If ALL PRIVILEGES was specified, then for each grantee, if no privilege descriptors were identified, then a completion condi- tion is raised: warning-privilege not granted. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) In Conforming Intermediate SQL language, an shall not specify COLLATION or TRANSLATION. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) In Conforming Entry SQL language, an shall not specify TABLE. b) In Conforming Entry SQL language, an shall not specify CHARACTER SET or DOMAIN. 332 Database Language SQL X3H2-92-154/DBL CBR-002 11.37 11.37 Function Destroy privileges. Format ::= REVOKE [ GRANT OPTION FOR ] ON FROM [ { }... ] Syntax Rules 1) If specifies a , , , or , then shall specify USAGE; otherwise, USAGE shall not be specified. 2) INSERT is equivalent to specifying both the INSERT table priv- ilege and INSERT () for all columns of . 3) UPDATE is equivalent to specifying both the UPDATE table priv- ilege and UPDATE () for all columns of . 4) REFERENCES is equivalent to specifying both the REFERENCES ta- ble privilege and REFERENCES () for all columns of . 5) Let O be the object identified by the . 6) Let A be the current . For every specified, a set of privilege descriptors is iden- tified. A privilege descriptor is said to be identified if it belongs to the set of privilege descriptors that define, for any explicitly or implicitly in , that on O granted by A to . Note: Column privilege descriptors become identified when explicitly or implicitly contains a . 7) A privilege descriptor D is allowed to be created by a grant permitted by P if either: a) The following conditions hold: i) P indicates that the privilege that it represents is grantable, and Schema definition and manipulation 333 X3H2-92-154/DBL CBR-002 11.37 ii) The grantee of P is the same as the grantor of D or the grantee of P is PUBLIC, and iii) Case: 1) P and D are both column privilege descriptors. The ac- tion and the identified column of P are the same as the action and identified column of D, respectively. 2) P is a table privilege descriptor and D is a column privilege descriptor. The identified table of P is the same as the identified table of D and the action of P is the same as the action of D and the action of P is SELECT. 3) Neither P nor D are column privilege descriptors. The action and the identified table, domain, character set, collation, or translation of P are the same as the ac- tion and the identified table, domain, character set, collation, or translation of D, respectively. b) The following conditions hold: i) The privilege descriptor for D indicates that its grantor is the special grantor value "_SYSTEM", and ii) The action of P is the same as the action of D, and iii) The grantee of P is the owner of the table, collation, or translation identified by D, or the grantee of P is PUBLIC, and iv) One of the following conditions hold: 1) P and D are both table privilege descriptors, the priv- ilege descriptor for D identifies the of a V and either: A) The action of P is SELECT and the identified table of P is contained in the of V, or B) V is an updatable view and the identified table of P is the underlying table of the . 2) P and D are both column privilege descriptors, the priv- ilege descriptor D identifies a CVN ex- plicitly or implicitly contained in the of a V and V is an updatable view. For every column CV identified by a CVN, there is a corresponding column in the underly- ing table of the TN. Let CTN be the of the column of the from which CV is derived. The action for P is UPDATE or INSERT and the identified column of P is TN.CTN. 334 Database Language SQL X3H2-92-154/DBL CBR-002 11.37 3) P is a table privilege descriptor and the column privi- lege descriptor D identifies a CV explic- itly or implicitly contained in the of a V. Let TN be a con- tained in the of the view. The action for P is SELECT and the identified table of P is TN. 4) The privilege descriptor D identifies the of a CO and the identified character set name of P is contained in the immediately contained in CO. 5) The privilege descriptor D identifies the of a TD and the identi- fied character set name of P is contained in the or the immediately contained in TD. 8) A privilege descriptor D is said to be directly dependent on an- other privilege descriptor P if D represents a privilege allowed to be created by a grant permitted by P. 9) The privilege dependency graph is a directed graph such that: a) Each node represents a privilege descriptor, and b) Each arc from node P1 to node P2 represents the fact that P2 directly depends on P1. An independent node is one that has no incoming arcs. 10)A privilege descriptor P is said to be modified if either P is a SELECT column privilege descriptor and a SELECT table privilege descriptor with the same grantee, grantor, catalog name, schema name, and table name is a modified privilege descriptor, or: a) P indicates that the privilege that it represents is grantable, and b) P directly depends on an identified privilege descriptor or a modified privilege descriptor, and c) Let XO and XA respectively be the identifier of the object identified by a privilege descriptor X and the action of X. Within the set of privilege descriptors upon which P directly depends, there exists some XO and XA for which the set of identified privilege descriptors unioned with the set of mod- ified privilege descriptors include all privilege descriptors specifying the grant of XA on XO with grant option, and d) At least one of the following is true: i) GRANT OPTION FOR is specified and the grantor of P is the special grantor value "_SYSTEM". Schema definition and manipulation 335 X3H2-92-154/DBL CBR-002 11.37 ii) There exists a path to P from an independent node that includes no identified or modified privilege descriptors. P is said to be a marked modified privilege descriptor. iii) P directly depends on a marked modified privilege descrip- tor, and the grantor of P is the special grantor value "_SYSTEM". P is said to be a marked modified privilege descriptor. 11)A privilege descriptor P is abandoned if: a) It is not an independent node, and Case: i) GRANT OPTION FOR is not specified, P is not itself a mod- ified privilege descriptor, and there exists no path to P from any independent node other than paths that include an identified privilege descriptor or a modified privilege descriptor. ii) GRANT OPTION FOR is specified, P is not itself a modi- fied privilege descriptor, and there exists no path to P from any independent node other than paths that include a modified privilege descriptor. b) P is a SELECT column privilege descriptor and there exists a SELECT table privilege descriptor X with the same grantee, grantor, catalog name, schema name, and table name and Case: i) GRANT OPTION FOR is not specified and X is an identified privilege descriptor or an abandoned privilege descriptor. ii) GRANT OPTION FOR is specified and X is an abandoned privi- lege descriptor. 12)Let S1 be the name of any schema and let A1 be the that owns the schema identified by S1. 13)Let V be any view descriptor included in S1. V is said to be abandoned if the destruction of all abandoned privilege descrip- tors and, if GRANT OPTION FOR is not specified, all identified privilege descriptors would result in A1 no longer having SELECT privilege on one or more tables or USAGE privilege on one or more domains, collations, character sets, or translations whose names are contained in the of V. 14)Let TC be any table constraint descriptor included in S1. TC is said to be abandoned if the destruction of all abandoned privi- lege descriptors and, if GRANT OPTION FOR is not specified, all identified privilege descriptors would result in A1 no longer having REFERENCES privilege on one or more referenced columns of TC or USAGE privilege on one or more domains, collations, 336 Database Language SQL X3H2-92-154/DBL CBR-002 11.37 character sets, or translations whose names are contained in any of TC. 15)Let AX be any assertion descriptor included in S1. AX is said to be abandoned if the destruction of all abandoned privilege descriptors and, if GRANT OPTION FOR is not specified, all iden- tified privilege descriptors would result in A1 no longer having REFERENCES privilege on one or more referenced columns of AX or USAGE privilege on one or more domains, collations, character sets, or translations whose names are contained in any of AX. 16)Let DC be any domain constraint descriptor included in S1. DC is said to be abandoned if the destruction of all abandoned privi- lege descriptors and, if GRANT OPTION FOR is not specified, all identified privilege descriptors would result in A1 no longer having REFERENCES privilege on one or more referenced columns of DC or USAGE privilege on one or more domains, collations, character sets, or translations whose names are contained in any of DC. 17)Let DO be any domain descriptor included in S1. DO is said to be abandoned if the destruction of all abandoned privilege descrip- tors and, if GRANT OPTION FOR is not specified, all identified privilege descriptors would result in A1 no longer having USAGE privilege on the collation whose name is contained in the of DO, if any. 18)If RESTRICT is specified, then there shall be no abandoned priv- ilege descriptors, abandoned views, abandoned table constraints, abandoned assertions, abandoned domain constraints, or abandoned domains. Access Rules 1) The applicable privileges shall include a privilege identifying O. General Rules 1) If GRANT OPTION FOR is not specified, then: a) All abandoned privilege descriptors are destroyed, and b) The identified privilege descriptors are destroyed, and c) The modified privilege descriptors are set to indicate that they are not grantable. 2) If GRANT OPTION FOR is specified, then Case: a) If CASCADE is specified, then all abandoned privilege de- scriptors are destroyed. Schema definition and manipulation 337 X3H2-92-154/DBL CBR-002 11.37 b) Otherwise, if there are any privilege descriptors directly dependent on an identified privilege descriptor that are not modified privilege descriptors, then an exception condition is raised: dependent privilege descriptors still exist. The identified privilege descriptors and the modified privilege descriptors are set to indicate that they are not grantable. 3) For every abandoned view descriptor V, let S1.VN be the of V. The following is effectively executed without further Access Rule checking: DROP VIEW S1.VN CASCADE 4) For every abandoned table constraint descriptor TC, let S1.TCN be the of TC and let S2.T2 be the of the table that contains TC (S1 and S2 not necessarily dif- ferent). The following is effectively executed without further Access Rule checking: ALTER TABLE S2.T2 DROP CONSTRAINT S1.TCN CASCADE 5) For every abandoned assertion descriptor AX, let S1.AXN be the of AX. The following is effectively executed without further Access Rule check- ing: DROP ASSERTION S1.AXN 6) For every abandoned domain constraint descriptor DC, let S1.DCN be the of DC and let S2.DN be the of the domain that contains DC. The following is effectively executed without further Access Rule checking: ALTER DOMAIN S2.DN DROP CONSTRAINT S1.DCN 7) For every abandoned domain descriptor DO, let S1.DN be the of DO. The following is effectively executed without further Access Rule checking: DROP DOMAIN S1.DN CASCADE 8) For every combination of and on O specified in , if there is no corresponding privilege de- scriptor in the set of identified privilege descriptors, then a completion condition is raised: warning-privilege not revoked. 9) If ALL PRIVILEGES was specified, then for each , if no privilege descriptors were identified, then a completion condition is raised: warning-privilege not revoked. 338 Database Language SQL X3H2-92-154/DBL CBR-002 11.37 Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain a . Schema definition and manipulation 339 X3H2-92-154/DBL CBR-002 340 Database Language SQL X3H2-92-154/DBL CBR-002 12 Module 12.1 Function Define a module. Format ::= [ ... ] ... ::= SCHEMA | AUTHORIZATION | SCHEMA AUTHORIZATION ::= ::= | | Syntax Rules 1) If SCHEMA is not specified, then a equal to is implicit. 2) If the explicit or implicit does not specify a , then an implementation-defined is implicit. 3) The implicit or explicit is the implicit for all unqualified s in the . 4) A or shall precede in the text of the any that references the of the or . Module 341 X3H2-92-154/DBL CBR-002 12.1 5) For every in a , there shall be exactly one in that that contains an that specifies the declared in the . Note: See the Syntax Rules of Subclause 13.1, "". Access Rules None. General Rules 1) If the SQL-agent that performs a call of a in a is not a program that conforms to the programming language standard specified by the of that , then the effect is implementation-dependent. 2) If the SQL-agent performs calls of s from more than one Ada task, then the results are implementation-dependent. 3) Case: a) If a is specified, then it is the current for privilege determination for the execution of each in the . b) Otherwise, the current for privi- lege determination for the execution of each in the is the SQL-session . 4) After the last time that an SQL-agent performs a call of a : a) A or a is effec- tively executed. If an unrecoverable error has occurred, or if the SQL-agent terminated unexpectedly, or if any con- straint is not satisfied, then a is performed. Otherwise, the choice of which of these SQL- statements to perform is implementation-dependent. The deter- mination of whether an SQL-agent has terminated unexpectedly is implementation-dependent. b) Let D be the of any system descriptor area that is currently allocated within an SQL-session associated with the SQL-agent. A that specifies DEALLOCATE DESCRIPTOR D is effectively executed. 342 Database Language SQL X3H2-92-154/DBL CBR-002 12.1 c) All SQL-sessions associated with the SQL-agent are termi- nated. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) A shall not contain a . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall be associated with an SQL-agent during its execution. An SQL-agent shall be associated with at most one . b) A shall not be a . c) A shall specify AUTHORIZATION and shall not specify SCHEMA. Module 343 X3H2-92-154/DBL CBR-002 12.2 12.2 Function Name a . Format ::= MODULE [ ] [ ] ::= NAMES ARE Syntax Rules 1) If a does not specify a , then the is unnamed. 2) The shall be different from the of any other in the same SQL-environment. Note: An SQL-environment may have multiple s that are unnamed. 3) If the of the containing specifies ADA, then a shall be specified, and that shall be a valid Ada library unit name. 4) If a is not specified, then a that specifies an implementation-defined character set that contains at least every character that is in is implicit. Access Rules None. General Rules 1) If a is specified, then in the SQL-environment the containing has the name given by . Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 344 Database Language SQL X3H2-92-154/DBL CBR-002 12.2 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) A shall not be speci- fied. Module 345 X3H2-92-154/DBL CBR-002 12.3 12.3 Function Define a procedure. Format ::= PROCEDURE ::= [ { }... ] | ... ::= | ::= SQLCODE | SQLSTATE Syntax Rules 1) The shall be different from the of any other in the containing . Note: The should be a standard-conforming pro- cedure, function, or routine name of the language specified by the subject . Failure to observe this recommen- dation will have implementation-dependent effects. 2) The of each in a shall be different from the of any other in that . 3) Any contained in the of a shall be specified in a in that . Note: s in a without enclos- ing parentheses and without commas separating multiple s is a deprecated feature that is supported for compatibility with earlier versions of this International Standard. See Annex D, "Deprecated features". 4) A call of a shall supply n parameters, where n is the number of s in the . 346 Database Language SQL X3H2-92-154/DBL CBR-002 12.3 5) A shall contain at least one , at most one that specifies SQLCODE, and at most one that specifies SQLSTATE. A parameter that corresponds with SQLCODE is referred to as an SQLCODE parameter. A parameter that corresponds with SQLSTATE is referred to as an SQLSTATE parameter. The SQLCODE and SQLSTATE parameters are referred to as status parameters. Note: SQLSTATE is the preferred status parameter. The SQLCODE status parameter is a deprecated feature that is supported for compatibility with earlier versions of this International Standard. See Annex D, "Deprecated features". 6) Whether a is for an input parameter, an output parameter, or both is determined as follows: Case: a) A is an output parameter. b) For every that is not a , Case: i) If the of a parameter is contained in a or a that is contained in , but it is not contained in a or a that is contained in , then the parameter is an input parameter. ii) If the of a parameter is contained in a or a that is contained in , but it is not contained in a or a that is contained in , then the parameter is an output parameter. iii) If the of a parameter is contained in a or a that is contained in and it is contained in a or a that is contained in , then the parameter is both an input parameter and an output parameter. iv) Otherwise, the parameter is neither an input parameter nor an output parameter. 7) The Syntax Rules of Subclause 12.4, "Calls to a ", shall be true. Module 347 X3H2-92-154/DBL CBR-002 12.3 Access Rules None. General Rules 1) A defines a procedure that may be called by an SQL- agent. 2) If the that contains the is associated with an SQL-agent that is associated with another that contains a with the same , then the effect is implementation-defined. 3) If the that contains the has an explicit MAI that is different from the SQL-session SAI, then: a) Whether or not SAI can invoke s in a with explicit MAI is implementation-defined, as are any restrictions pertaining to such invocation. b) If SAI is restricted from invoking a in a with explicit MAI, then an exception condition is raised: invalid authorization specification. 4) If the value of any input parameter provided by the SQL-agent falls outside the set of allowed values of the data type of the parameter, or if the value of any output parameter resulting from the execution of the falls outside the set of values supported by the SQL-agent for that parameter, then the effect is implementation-defined. If the implementation- defined effect is the raising of an exception condition, then an exception condition is raised: data exception-invalid parameter value. 5) Let S be the of the . 6) When the is called by an SQL-agent: Case: a) If S is an , then: i) The that contains S is associated with the SQL- agent. ii) The diagnostics area is emptied. iii) S is executed. 348 Database Language SQL X3H2-92-154/DBL CBR-002 12.3 iv) If S successfully initiated or resumed an SQL-session, then subsequent calls to a by the SQL-agent are associated with that SQL-session until the SQL-agent terminates the SQL-session or makes it dormant. b) If S is an , then: i) The that contains S is associated with the SQL- agent. ii) S is executed. c) Otherwise: i) If no SQL-session is current for the SQL-agent, then Case: 1) If the SQL-agent has not executed an and there is no default SQL-session asso- ciated with the SQL-agent, then the following is effectively executed: CONNECT TO DEFAULT 2) If the SQL-agent has not executed an and there is a default SQL-session associated with the SQL-agent, then the following is effectively executed: SET CONNECTION DEFAULT 3) Otherwise, an exception condition is raised: connection exception-connection does not exist. Subsequent calls to a or invocations of s by the SQL-agent are associated with the SQL-session until the SQL-agent terminates the SQL-session or makes it dormant. ii) If an SQL-transaction is active for the SQL-agent, then S is associated with that SQL-transaction. iii) If no SQL-transaction is active for the SQL-agent and S is a transaction-initiating SQL-statement, then 1) An SQL-transaction is effectively initiated and asso- ciated with this call and with subsequent calls of any or invocations of s by that SQL-agent until the SQL-agent terminates that SQL-transaction. 2) Case: A) If a has been executed since the termination of the last SQL-transaction in the SQL-session, then the access mode, constraint Module 349 X3H2-92-154/DBL CBR-002 12.3 mode, and isolation level of the SQL-transaction are set as specified by the . B) Otherwise, the access mode of that SQL-transaction is read-write, the constraint mode for all constraints in that SQL-transaction is immediate, and the isolation level of that SQL-transaction is SERIALIZABLE. 3) The SQL-transaction is associated with the SQL-session. 4) The that contains S is associated with the SQL-transaction. iv) The that contains S is associated with the SQL- agent. v) If S contains an and the access mode of the current SQL-transaction is read-only, then an exception condition is raised: invalid transaction state. vi) The diagnostics area is emptied. vii) The values of all input parameters to the are established. viii)S is executed. 7) If the non-dynamic or dynamic execution of an or the execution of an , , or occurs within the same SQL-transaction as the non-dynamic or dynamic execution of an SQL-schema statement and this is not allowed by the SQL-implementation, then an exception condition is raised: invalid transaction state. 8) When a is called by an SQL-agent, let PDi be the of the i-th parameter and let DTi and PNi be the and the specified in PDi, respectively. Let PIi be the i-th parameter in the proce- dure call. 9) If S is a | | | | | | | ::= | | | | | | | | | | ::= | | | 13.5 ::= SELECT [ ] shall be the same as the number of elements in the corresponds with the i-th element of the and are those specified in the . S shall be a valid . Access Rules None. General Rules 1) Let Q be the result of S. 2) Case: a) If the cardinality of Q is greater than 1, then an ex- ception condition is raised: cardinality violation. It is implementation-dependent whether or not SQL-data values are assigned to the targets identified by the , and a comple- tion condition is raised: no data. 382 Database Language SQL X3H2-92-154/DBL CBR-002 13.5 is in an implementation-dependent order. 5) If an exception condition is raised during the assignment of a value to a target, then the values of targets are implementation- dependent. 6) The target identified by the i-th of the is an exact numeric type, then the data type of the i-th column of the table T shall be an exact numeric type. b) The shall not include a or a and shall not identify a grouped view. Data manipulation 383 X3H2-92-154/DBL CBR-002 13.6 13.6 Function Delete a row of a table. Format ::= DELETE FROM WHERE CURRENT OF Syntax Rules 1) The containing shall contain a whose is the same as the in the . Let CR be the cursor specified by . 2) CR shall be an updatable cursor. Note: updatable cursor is defined in Subclause 13.1, "". 3) Let T be the table identified by the . Let QS be the that is the simply underlying table of the simply underlying table of CR. The simply underlying table of QS shall be T. Note: The simply underlying table of a is defined in Subclause 13.1, "". Access Rules 1) The applicable privileges shall include DELETE for the . Note: The applicable privileges for a are defined in Subclause 10.3, "". General Rules 1) If the access mode of the current SQL-transaction is read-only and T is not a temporary table, then an exception condition is raised: invalid transaction state. 2) If cursor CR is not positioned on a row, then an exception con- dition is raised: invalid cursor state. 3) The row from which the current row of CR is derived is marked for deletion. 4) If, while CR is open, the row from which the current row of CR is derived has been marked for deletion by any , marked for deletion by any that identifies any cursor other than CR, updated 384 Database Language SQL X3H2-92-154/DBL CBR-002 13.6 by any , or updated by any that identifies any cursor other than CR, then a completion condition is raised: warning-cursor operation conflict. 5) All rows that are marked for deletion are effectively deleted at the end of the prior to the checking of any integrity constraint. 6) If the deleted the last row of CR, then the position of CR is after the last row; otherwise, the position of CR is before the next row. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Data manipulation 385 X3H2-92-154/DBL CBR-002 13.7 13.7 Function Delete rows of a table. Format ::= DELETE FROM [ WHERE ] Syntax Rules 1) Let T be the table identified by the . T shall not be a read-only table. 2) The scope of the is the entire . Access Rules 1) The applicable privileges shall include DELETE for the . Note: The applicable privileges for a are defined in Subclause 10.3, "". General Rules 1) If the access mode of the current SQL-transaction is read-only and T is not a temporary table, then an exception condition is raised: invalid transaction state. 2) Case: a) If is not specified, then all rows of T are marked for deletion. b) If is specified, then it is applied to each row of T with the bound to that row, and all rows for which the result of the is true are marked for deletion. The is effectively evaluated for each row of T before marking for deletion any row of T. Each in the is effectively executed for each row of T and the results used in the ap- plication of the to the given row of T. If any executed contains an outer reference to a column of T, the reference is to the value of that column in the given row of T. Note: Outer reference is defined in Subclause 6.4, "". 386 Database Language SQL X3H2-92-154/DBL CBR-002 13.7 3) If any row that is marked for deletion by the has been marked for deletion by any that identifies some cursor CR that is still open or updated by any that identifies some cursor CR that is still open, then a completion condition is raised: warning-cursor operation conflict. 4) All rows that are marked for deletion are effectively deleted at the end of the prior to the checking of any integrity constraint. 5) If no row is deleted, then a completion condition is raised: no data. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) No leaf generally underlying table of T shall be an under- lying table of any generally contained in the . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Data manipulation 387 X3H2-92-154/DBL CBR-002 13.8 13.8 Function Create new rows in a table. Format ::= INSERT INTO ::= [ ] | DEFAULT VALUES ::= Syntax Rules 1) The table T identified by the shall not be a read- only table. 2) An that specifies DEFAULT VALUES is equivalent to an that specifies a of the form VALUES (DEFAULT, . . . ) where the number of "DEFAULT" entries is equal to the number of columns of T. 3) No of T shall be identified more than once. If the is omitted, then an that identifies all columns of T in the ascending sequence of their ordinal positions within T is implicit. 4) A column identified by the is an object column. 5) Let QT be the table specified by the . The degree of QT shall be equal to the number of s in the . The column of table T identified by the i-th in the corresponds with the i-th column of QT. 6) The Syntax Rules of Subclause 9.2, "Store assignment", apply to corresponding columns of T and QT as TARGET and VALUE, respec- tively. 388 Database Language SQL X3H2-92-154/DBL CBR-002 13.8 Access Rules 1) Case: a) If an is specified, then the applicable shall include INSERT for each in the . b) Otherwise, the applicable privileges shall include INSERT for each in T. Note: The applicable privileges for a are defined in Subclause 10.3, "". 2) Each in the shall identify a column of T. General Rules 1) If the access mode of the current SQL-transaction is read-only and T is not a temporary table, then an exception condition is raised: invalid transaction state. 2) Let B be the leaf generally underlying table of T. 3) The is effectively evaluated before inserting any rows into B. 4) Let Q be the result of that . Case: a) If Q is empty, then no row is inserted and a completion con- dition is raised: no data. b) Otherwise, for each row R of Q: i) A candidate row of B is effectively created in which the value of each column is its default value, as specified in the General Rules of Subclause 11.5, "". The candidate row includes every column of B. ii) For every object column in the candidate row, the value of the object column identified by the i-th in the is replaced by the i-th value of R. iii) Let C be a column that is represented in the candidate row and let SV be its value in the candidate row. The General Rules of Subclause 9.2, "Store assignment", are applied to C and SV as TARGET and VALUE, respectively. iv) The candidate row is inserted into B. Note: The data values allowable in the candidate row may be constrained by a WITH CHECK OPTION constraint. The effect of a WITH CHECK OPTION constraint is defined in the General Rules of Subclause 11.19, "". Data manipulation 389 X3H2-92-154/DBL CBR-002 13.8 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) The leaf generally underlying table of T shall not be gen- erally contained in the immediately contained in the except as the of a . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) The that is contained in an shall be a or it shall be a that contains exactly one of the form " ", and each of that shall be a . b) If the data type of the target identified by the i-th is an exact numeric type, then the data type of the i- th item of the shall be an exact numeric type. c) If the data type of the target C identified by the i-th is character string, then the length in characters of the i-th item of the shall be less than or equal to the length of C. d) The shall immediately contain a . 390 Database Language SQL X3H2-92-154/DBL CBR-002 13.9 13.9 Function Update a row of a table. Format ::= UPDATE SET WHERE CURRENT OF ::= [ { }... ] ::= ::= | | DEFAULT ::= Syntax Rules 1) The containing shall contain a for a cursor whose is the same as the in the . Let CR be the cursor specified by . 2) CR shall be an updatable cursor. Note: updatable cursor is defined in Subclause 13.1, "". 3) Let T be the table identified by the . Let QS be the that is the simply underlying table of the simply underlying table of CR. The simply underlying table of QS shall be T. Note: The simply underlying table of a is defined in Subclause 13.1, "". 4) If CR is an ordered cursor, then for each OC, the column of T identified by OC shall not be directly or in- directly referenced in the of the defining for CR. Data manipulation 391 X3H2-92-154/DBL CBR-002 13.9 5) No leaf generally underlying table of T shall be an underly- ing table of any generally contained in any immediately contained in any contained in the . 6) A in a shall not directly con- tain a . 7) The same shall not appear more than once in a . 8) If the cursor identified by was specified using an explicit or implicit of FOR UPDATE, then each specified as an shall identify a column in the explicit or implicit associated with the . 9) The scope of the is the entire . 10)For every , the Syntax Rules of Subclause 9.2, "Store assignment", apply to the column of T identified by the and the of the as TARGET and VALUE, respectively. Access Rules 1) The applicable privileges shall include UPDATE for each . Note: The applicable privileges for a are defined in Subclause 10.3, "". 2) Each specified as an shall iden- tify a column of T. General Rules 1) If the access mode of the current SQL-transaction is read-only and T is not a temporary table, then an exception condition is raised: invalid transaction state. 2) If cursor CR is not positioned on a row, then an exception con- dition is raised: invalid cursor state. 3) The object row is that row from which the current row of CR is derived. 4) If, while CR is open, the object row has been marked for dele- tion by any , marked for deletion by any that identifies any cursor other than CR, updated by any , or updated by any that identifies any cursor other than CR, then a completion condition is raised: warning-cursor operation conflict. 392 Database Language SQL X3H2-92-154/DBL CBR-002 13.9 5) The value of DEFAULT is the default value indicated in the col- umn descriptor for the in the containing . 6) The s are effectively evaluated before updat- ing the object row. If a contains a reference to a column of T, then the reference is to the value of that column in the object row before any value of the object row is updated. 7) CR remains positioned on its current row, even if an exception condition is raised during derivation of any associated with the object row. 8) A specifies an object column and an update value of that column. The object column is the column identified by the in the . The update value is the value specified by the . Note: The data values allowable in the object row may be con- strained by a WITH CHECK OPTION constraint. The effect of a WITH CHECK OPTION constraint is defined in the General Rules of Subclause 11.19, "". 9) The object row is updated as specified by each . For each , the value of the specified object column, denoted by C, is replaced by the specified update value, denoted by SV. The General Rules of Subclause 9.2, "Store assignment", are applied to C and SV as TARGET and VALUE, respectively. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) CR shall not be an ordered cursor. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) If the data type of the column identified by the i-th is an exact numeric type, then the data type of the i-th in the shall be an exact numeric type. b) If the data type of the column identified by the i-th C is character string, then the length in characters of the i-th in the shall be less than or equal to the length of C. c) An shall not specify DEFAULT. Data manipulation 393 X3H2-92-154/DBL CBR-002 13.10 13.10 Function Update rows of a table. Format ::= UPDATE SET [ WHERE ] Syntax Rules 1) Let T be the table identified by the . T shall be an updatable table. 2) A in a shall not directly con- tain a . 3) The same shall not appear more than once in a . 4) The scope of the is the entire . 5) For every , the Syntax Rules of Subclause 9.2, "Store assignment", apply to the column of T identified by the and the of the as TARGET and VALUE, respectively. Access Rules 1) The applicable privileges shall include UPDATE for each . Note: The applicable privileges for a are defined in Subclause 10.3, "". 2) Each specified as an shall iden- tify a column of T. General Rules 1) If the access mode of the current SQL-transaction is read-only and T is not a temporary table, then an exception condition is raised: invalid transaction state. 2) Case: a) If a is not specified, then all rows of T are the object rows. 394 Database Language SQL X3H2-92-154/DBL CBR-002 13.10 b) If a is specified, then it is applied to each row of T with the bound to that row, and the object rows are those rows for which the result of the is true. The is effectively evaluated for each row of T before updating any row of T. Each in the is effectively executed for each row of T and the results used in the ap- plication of the to the given row of T. If any executed contains an outer reference to a column of T, the reference is to the value of that column in the given row of T. Note: Outer reference is defined in Subclause 6.4, "". 3) If any row in the set of object rows has been marked for dele- tion by any that identifies some cursor CR that is still open or updated by any that identifies some cursor CR that is still open, then a completion condition is raised: warning-cursor operation conflict. 4) If the set of object rows is empty, then a completion condition is raised: no data. 5) If a completion condition no data has been raised, then no fur- ther General Rules of this Subclause are applied. 6) The s are effectively evaluated for each row of T before updating any row of T. 7) A specifies an object column and an update value of that column. The object column is the column identified by the in the . The update value is the value specified by the . Note: The data values allowable in the object row may be con- strained by a WITH CHECK OPTION constraint. The effect of a WITH CHECK OPTION constraint is defined in the General Rules of Subclause 11.19, "". 8) Each object row is updated as specified by each . For each , the value of the specified object column, denoted by C, is replaced by the specified update value, denoted by SV. The General Rules of Subclause 9.2, "Store assignment", are applied to C and SV as TARGET and VALUE, respectively. Data manipulation 395 X3H2-92-154/DBL CBR-002 13.10 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) No leaf generally underlying table of T shall be an under- lying table of any generally contained in the or in any immedi- ately contained in any contained in the . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) If the data type of the column identified by the i-th is an exact numeric type, then the data type of the i-th in the shall be an exact numeric type. b) If the data type of the column identified by the i-th C is character string, then the length in characters of the i-th in the shall be less than or equal to the length of C. 396 Database Language SQL X3H2-92-154/DBL CBR-002 13.11 13.11 Function Declare a declared local temporary table that will be effectively materialized the first time that any in the that contains the is executed and whose scope is all the s of that executed within the same SQL-session. Format ::= DECLARE LOCAL TEMPORARY TABLE [ ON COMMIT { PRESERVE | DELETE } ROWS ] Syntax Rules 1) Let T be the of . T shall be different from the of any other contained within the . 2) Let A be the current . 3) The descriptor of the table defined by a includes the name of T and the column descriptor speci- fied by each . The i-th column descriptor is given by the i-th . 4) A shall contain at least one . 5) If ON COMMIT is not specified, then ON COMMIT DELETE ROWS is implicit. Access Rules None. General Rules 1) Let U be the implementation-dependent that is effectively derived from the implementation-dependent SQL- session identifier associated with the SQL-session and an implementation-dependent name associated with the that contains the . 2) The definition of T within a is effectively equivalent to the definition of a persistent base table U.T. Within the module, any reference to MODULE.T is equivalent to a reference to U.T. Data manipulation 397 X3H2-92-154/DBL CBR-002 13.11 3) A set of privilege descriptors is created that define the priv- ileges INSERT, SELECT, UPDATE, DELETE, and REFERENCES on this table and INSERT (), UPDATE (), and REFERENCES () for every in the table definition to A. These privileges are not grantable. The grantor for each of these privilege descriptors is set to the special grantor value "_SYSTEM". 4) The definition of a temporary table persists for the duration of the SQL-session. The termination of the SQL-session is effec- tively followed by the execution of the following with the current and current U without further Access Rule checking: DROP TABLE T 5) The definition of a declared local temporary table does not appear in any view of the Information Schema. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 398 Database Language SQL X3H2-92-154/DBL CBR-002 14 Transaction management 14.1 Function Set the attributes of the next SQL-transaction for the SQL-agent. Format ::= SET TRANSACTION [ { }... ] ::= | | ::= READ ONLY | READ WRITE ::= ISOLATION LEVEL ::= READ UNCOMMITTED | READ COMMITTED | REPEATABLE READ | SERIALIZABLE ::= DIAGNOSTICS SIZE ::= Syntax Rules 1) No shall be specified more than once. 2) If an is not specified, then a of ISOLATION LEVEL SERIALIZABLE is implicit. 3) If READ WRITE is specified, then the shall not be READ UNCOMMITTED. Transaction management 399 X3H2-92-154/DBL CBR-002 14.1 4) If a is not specified and a of READ UNCOMMITTED is specified, then READ ONLY is implicit. Otherwise, READ WRITE is implicit. 5) The data type of shall be exact numeric with scale 0. Access Rules None. General Rules 1) If a statement is executed when an SQL-transaction is currently active, then an exception condition is raised: invalid transaction state. 2) If is specified and is less than 1, then an exception condition is raised: invalid condition number. 3) Let TXN be the next SQL-transaction for the SQL-agent. 4) If READ ONLY is specified, then the access mode of TXN is set to read-only. If READ WRITE is specified, then the access mode of TXN is set to read-write. 5) The isolation level of TXN is set to an implementation-defined isolation level that will not exhibit any of the phenomena that the explicit or implicit would not exhibit, as specified in Table 9, "SQL-transaction isolation levels and the three phenomena". 6) If is specified, then the diagnostics area limit of TXN is set to . 7) If is not specified, then the diagnostics area limit of TXN is set to an implementation-dependent value not less than 1. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . 400 Database Language SQL X3H2-92-154/DBL CBR-002 14.2 14.2 Function If an SQL-transaction is currently active, then set the constraint mode for that SQL-transaction in the current SQL-session. If no SQL-transaction is currently active, then set the constraint mode for the next SQL-transaction in the current SQL-session for the SQL-agent. Format ::= SET CONSTRAINTS { DEFERRED | IMMEDIATE } ::= ALL | [ { }... ] Syntax Rules 1) If a is specified, then it shall identify a constraint. 2) The constraint identified by shall be DEFERRABLE. Access Rules None. General Rules 1) If an SQL-transaction is currently active, then let TXN be the currently active SQL-transaction. Otherwise, let TXN be the next SQL-transaction for the SQL-agent. 2) If IMMEDIATE is specified, then Case: a) If ALL is specified, then the constraint mode in TXN of all constraints that are DEFERRABLE is set to immediate. b) Otherwise, the constraint mode in TXN for the constraints identified by the s in the is set to immediate. 3) If DEFERRED is specified, then Transaction management 401 X3H2-92-154/DBL CBR-002 14.2 Case: a) If ALL is specified, then the constraint mode in TXN of all constraints that are DEFERRABLE is set to deferred. b) Otherwise, the constraint mode in TXN for the constraints identified by the s in the is set to deferred. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 402 Database Language SQL X3H2-92-154/DBL CBR-002 14.3 14.3 Function Terminate the current SQL-transaction with commit. Format ::= COMMIT [ WORK ] Syntax Rules None. Access Rules None. General Rules 1) If the current SQL-transaction is part of an encompassing trans- action that is controlled by an agent other than the SQL-agent, then an exception condition is raised: invalid transaction ter- mination. 2) For every open cursor CR in any associated with the current SQL-transaction, the following statement is implicitly executed: CLOSE CR 3) For every temporary table in any associated with the current SQL-transaction that specifies the ON COMMIT DELETE option and that was updated by the current SQL-transaction, the execution of the is effectively preceded by the execution of a that specifies DELETE FROM T, where T is the of that temporary table. 4) The effects specified in the General Rules of Subclause 14.2, "" occur as if the statement SET CONSTRAINTS ALL IMMEDIATE were executed. 5) Case: a) If any constraint is not satisfied, then any changes to SQL- data or schemas that were made by the current SQL-transaction are canceled and an exception condition is raised: transac- tion rollback-integrity constraint violation. Transaction management 403 X3H2-92-154/DBL CBR-002 14.3 b) If any other error preventing commitment of the SQL- transaction has occurred, then any changes to SQL-data or schemas that were made by the current SQL-transaction are canceled and an exception condition is raised: transaction rollback with an implementation-defined subclass value. c) Otherwise, any changes to SQL-data or schemas that were made by the current SQL-transaction are made accessible to all concurrent and subsequent SQL-transactions. 6) The current SQL-transaction is terminated. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) In conforming Entry SQL language, WORK shall be specified. 404 Database Language SQL X3H2-92-154/DBL CBR-002 14.4 14.4 Function Terminate the current SQL-transaction with rollback. Format ::= ROLLBACK [ WORK ] Syntax Rules None. Access Rules None. General Rules 1) If the current SQL-transaction is part of an encompassing trans- action that is controlled by an agent other than the SQL-agent and the is not being implicitly executed, then an exception condition is raised: invalid transaction ter- mination. 2) For every open cursor CR in any associated with the current SQL-transaction, the following statement is implicitly executed: CLOSE CR 3) Any changes to SQL-data or schemas that were made by the current SQL-transaction are canceled. 4) The current SQL-transaction is terminated. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) In conforming Entry SQL language, WORK shall be specified. Transaction management 405 X3H2-92-154/DBL CBR-002 406 Database Language SQL X3H2-92-154/DBL CBR-002 15 Connection management 15.1 Function Establish an SQL-connection. Format ::= CONNECT TO ::= [ AS ] [ USER ] | DEFAULT Syntax Rules 1) If is not specified, then an implementation-defined for the SQL-connection is implicit. Access Rules None. General Rules 1) If a is executed after the first transaction- initiating SQL-statement executed by the current SQL-transaction and the implementation does not support transactions that affect more than one SQL-server, then an exception condition is raised: feature not supported-multiple server transactions 2) If is specified, then let S be the character string that is the value of and let V be the character string that is the value of TRIM ( BOTH ' ' FROM CV ) 3) If V does not conform to the Format and Syntax Rules of an , then an exception condition is raised: invalid authorization specification. Connection management 407 X3H2-92-154/DBL CBR-002 15.1 4) If the that contains the that contains the specifies a , then whether or not must be identical to that is implementation-defined, as are any other restrictions on the value of . Otherwise, any restrictions on the value of are implementation-defined. 5) If the value of violates the implementation-defined restrictions, then an exception condition is raised: invalid authorization specification. 6) If was specified, then let CV be the value of the immediately contained in . If neither DEFAULT nor were specified, then let CV be the value of . Let CN be the result of TRIM ( BOTH ' ' FROM CV ) If CN does not conform to the Format and Syntax Rules of an , then an exception condition is raised: invalid connection name. 7) If an SQL-connection with name CN has already been established by the current SQL-agent and has not been disconnected, or if DEFAULT is specified and a default SQL-connection has already been established by the current SQL-agent and has not been dis- connected, then an exception condition is raised: connection exception-connection name in use. 8) Case: a) If DEFAULT is specified, then the default SQL-session is initiated and associated with the default SQL-server. The method by which the default SQL-server is determined is implementation-defined. b) Otherwise, an SQL-session is initiated and associated with the SQL-server identified by . The method by which is used to determine the appropriate SQL-server is implementation-defined. 9) If the successfully initiates an SQL- session, then: a) The current SQL-connection and current SQL-session, if any, become a dormant SQL-connection and a dormant SQL-session, respectively. The SQL-server context information is preserved and is not affected in any way by operations performed over the initiated SQL-connection. Note: The SQL-session context information is defined in Subclause 4.30, "SQL-sessions". 408 Database Language SQL X3H2-92-154/DBL CBR-002 15.1 b) The SQL-session initiated by the becomes the current SQL-session and the SQL-connection established to that SQL-session becomes the current SQL-connection. Note: If the fails to initiate an SQL- session, then the current SQL-connection and current SQL- session, if any, remain unchanged. 10)If the SQL-client cannot establish the SQL-connection, then an exception condition is raised: connection exception- SQL-client unable to establish SQL-connection. 11)If the SQL-server rejects the establishment of the SQL- connection, then an exception condition is raised: connection exception- SQL-server rejected establishment of SQL-connection. 12)The SQL-server for the subsequent execution of s in any s associated with the SQL-agent is set to the SQL-server identified by . 13)The SQL-session is set to . Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Connection management 409 X3H2-92-154/DBL CBR-002 15.2 15.2 Function Select an SQL-connection from the available SQL-connections. Format ::= SET CONNECTION ::= DEFAULT | Syntax Rules None. Access Rules None. General Rules 1) If a is executed after the first transaction-initiating SQL-statement executed by the current SQL-transaction and the implementation does not support trans- actions that affect more than one SQL-server, then an excep- tion condition is raised: feature not supported-multiple server transactions. 2) Case: a) If DEFAULT is specified and there is no default SQL- connection that is current or dormant for the current SQL- agent, then an exception condition is raised: connection exception-connection does not exist. b) Otherwise, if does not identify an SQL- session that is current or dormant for the current SQL-agent, then an exception condition is raised: connection exception- connection does not exist. 3) If the SQL-connection identified by cannot be selected, then an exception condition is raised: connection exception-connection failure. 4) The current SQL-connection and current SQL-session become a dor- mant SQL-connection and a dormant SQL-session, respectively. 410 Database Language SQL X3H2-92-154/DBL CBR-002 15.2 The SQL-server context information is preserved and is not af- fected in any way by operations performed over the selected SQL-connection. Note: The SQL-session context information is defined in Subclause 4.30, "SQL-sessions". 5) The SQL-connection identified by becomes the current SQL-connection and the SQL-session associated with that SQL-connection becomes the current SQL-session. All SQL-session context information is restored to the same state as at the time the SQL-connection became dormant. Note: The SQL-session context information is defined in Subclause 4.30, "SQL-sessions". 6) The SQL-server for the subsequent execution of s in any s associated with the SQL-agent is set to that of the current SQL-connection. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Connection management 411 X3H2-92-154/DBL CBR-002 15.3 15.3 Function Terminate an SQL-connection. Format ::= DISCONNECT ::= | ALL | CURRENT Syntax Rules None. Access Rules None. General Rules 1) If is specified and does not identify an SQL-connection that is current or dormant for the current SQL-agent, then an exception condition is raised: connection exception-connection does not exist. 2) If DEFAULT is specified and there is no default SQL-connection that is current or dormant for the current SQL-agent, then an exception condition is raised: connection exception-connection does not exist. 3) If CURRENT is specified and there is no current SQL-connection for the current SQL-agent, then an exception condition is raised: connection exception-connection does not exist. 4) Let C be the current SQL-connection. 5) Let L be a list of SQL-connections. If a is specified, then L is that SQL-connection. If CURRENT is spec- ified, then L is the current SQL-connection, if any. If ALL is specified, then L is a list representing every SQL-connection that is current or dormant for the current SQL-agent, in an implementation-dependent order. If DEFAULT is specified, then L is the default SQL-connection. 6) If any SQL-connection in L is active, then an exception condi- tion is raised: invalid transaction state. 412 Database Language SQL X3H2-92-154/DBL CBR-002 15.3 7) For every SQL-connection C1 in L, treating the SQL-session S1 identified by C1 as the current SQL-session, all of the actions that are required after the last call of a by an SQL-agent, except for the execution of a or a , are performed. C1 is terminated, re- gardless of any exception condition that might occur during the disconnection process. Note: See the General Rules of Subclause 12.1, "", for the actions to be performed after the last call of a by an SQL-agent. 8) If any error is detected during execution of a , then a completion condition is raised: warning- disconnect error. 9) If C is contained in L, then there is no current SQL-connection following the execution of the . Otherwise, C remains the current SQL-connection. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Connection management 413 X3H2-92-154/DBL CBR-002 414 Database Language SQL X3H2-92-154/DBL CBR-002 16 Session management 16.1 Function Set the default catalog name for unqualified s in s that are prepared in the current SQL- session by an or a and in s that are invoked directly. Format ::= SET CATALOG Syntax Rules 1) The of the shall be an SQL character data type. Access Rules None. General Rules 1) Let S be the character string that is the value of the and let V be the character string that is the value of TRIM ( BOTH ' ' FROM S ) 2) If V does not conform to the Format and Syntax Rules of a , then an exception condition is raised: invalid catalog name. 3) The default catalog name of the current SQL-session is set to V. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . Session management 415 X3H2-92-154/DBL CBR-002 16.1 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions None. 416 Database Language SQL X3H2-92-154/DBL CBR-002 16.2 16.2 Function Set the default schema name for unqualified s in s that are prepared in the current SQL- session by an or a and in s that are invoked directly. Format ::= SET SCHEMA Syntax Rules 1) The data type of the shall be an SQL char- acter data type. Access Rules None. General Rules 1) Let S be the character string that is the value of the and let V be the character string that is the value of TRIM ( BOTH ' ' FROM S ) 2) If V does not conform to the Format and Syntax Rules of a , then an exception condition is raised: invalid schema name. 3) Case: a) If V conforms to the Format and Syntax Rules for a that contains a , then let X be the part and let Y be the part of V. The following statement is implicitly executed: SET CATALOG 'X' and the is effectively replaced by: SET SCHEMA 'Y' b) Otherwise, the default unqualified schema name of the current SQL-session is set to V. Session management 417 X3H2-92-154/DBL CBR-002 16.2 Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 418 Database Language SQL X3H2-92-154/DBL CBR-002 16.3 16.3 Function Set the default character set name for s and s in s that are prepared in the current SQL-session by an or a and in s that are invoked directly. Format ::= SET NAMES Syntax Rules 1) The of the shall be an SQL character data type. Access Rules None. General Rules 1) Let S be the character string that is the value of the and let V be the character string that is the value of TRIM ( BOTH ' ' FROM S ) 2) If V does not conform to the Format and Syntax Rules of a , then an exception condition is raised: invalid character set name. 3) The default character set name of the current SQL-session is set to V. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Session management 419 X3H2-92-154/DBL CBR-002 16.4 16.4 Function Set the of the current SQL-session. Format ::= SET SESSION AUTHORIZATION Syntax Rules 1) The of the shall be an SQL character data type. Access Rules None. General Rules 1) If a is ex- ecuted and an SQL-transaction is currently active, then an exception condition is raised: invalid transaction state. 2) Let S be the character string that is the value of the and let V be the character string that is the value of TRIM ( BOTH ' 'FROM S ) 3) If V does not conform to the Format and Syntax Rules of an , then an exception condition is raised: invalid authorization specification. 4) Whether or not the for the SQL- session can be set to an other than the of the SQL-session when the SQL-session is started is implementation-defined, as are any restrictions pertaining to such changes. 5) If the current is restricted from setting the to the specified value, then an exception condition is raised: invalid authorization specification. 6) Let T be any temporary table defined in the currently active SQL-session. In all the privilege descriptors for T and for each of the columns of T, the is set to V. 420 Database Language SQL X3H2-92-154/DBL CBR-002 16.4 7) The of the current SQL-session is set to V. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any . Session management 421 X3H2-92-154/DBL CBR-002 16.5 16.5 Function Set the default local time zone displacement for the current SQL- session. Format ::= SET TIME ZONE ::= | LOCAL Syntax Rules 1) The of the immediately contained in the shall be INTERVAL HOUR TO MINUTE. Access Rules None. General Rules 1) Case: a) If LOCAL is specified, then the default local time zone dis- placement of the current SQL-session is set to the original implementation-defined default local time zone displace- ment that was established when the current SQL-session was started. b) Otherwise, Case: i) If the value of the is not the null value and is between INTERVAL -'12:59' and INTERVAL +'13:00', then the default local time zone displacement of the current SQL-session is set to the value of the . ii) Otherwise, an exception condition is raised: data exception- invalid time zone displacement value. 422 Database Language SQL X3H2-92-154/DBL CBR-002 16.5 Leveling Rules 1) The following restrictions apply for Intermediate SQL; None. 2) The following restrictions apply for Entry SQL; a) Conforming Entry SQL language shall not contain any . Session management 423 X3H2-92-154/DBL CBR-002 424 Database Language SQL X3H2-92-154/DBL CBR-002 17 Dynamic SQL 17.1 Description of SQL item descriptor areas Function Specify the identifiers, data types, and codes used in SQL item descriptor areas. Syntax Rules 1) An SQL item descriptor area comprises the items specified in Table 17, "Data types of s used in SQL item descriptor areas". 2) Let DT be a data type. The data type T of a or a SVT is said to match the data type specified by the item descriptor area if and only if one of the following conditions is true. Case: a) TYPE indicates NUMERIC and T is specified by NUMERIC(P,S), where P is the value of PRECISION and S is the value of SCALE. b) TYPE indicates DECIMAL and T is specified by DECIMAL(P,S), where P is the value of PRECISION and S is the value of SCALE. c) TYPE indicates INTEGER and T is specified by INTEGER. d) TYPE indicates SMALLINT and T is specified by SMALLINT. e) TYPE indicates FLOAT and T is specified by FLOAT(P), where P is the value of PRECISION. f) TYPE indicates REAL and T is specified by REAL. g) TYPE indicates DOUBLE PRECISION and T is specified by DOUBLE PRECISION. h) TYPE indicates BIT and T is specified by BIT(L), where L is the value of LENGTH. i) TYPE indicates BIT VARYING and T is specified by BIT VARYING(L), where Case: i) SVT is a and L is the value of LENGTH. Dynamic SQL 425 X3H2-92-154/DBL CBR-002 17.1 Description of SQL item descriptor areas ii) SVT is a and L is not less than the value of LENGTH. j) TYPE indicates CHARACTER and T is specified by CHARACTER(L), where L is the value of LENGTH and the formed by the values of CHARACTER_SET_CATALOG, CHARACTER_SET_SCHEMA, and CHARACTER_SET_NAME identifies the character set of SVT. k) TYPE indicates CHARACTER VARYING and T is specified by CHARACTER VARYING(L), where the formed by the values of CHARACTER_SET_CATALOG, CHARACTER_SET_ SCHEMA, and CHARACTER_SET_NAME identifies the character set of SVT and Case: i) SVT is a and L is the value of LENGTH. ii) SVT is a and L is not less than the value of LENGTH. 3) An item descriptor area is valid if and only if TYPE indicates a code defined in Table 18, "Codes used for SQL data types in Dynamic SQL", and one of the following is true: Case: a) TYPE indicates NUMERIC and PRECISION and SCALE are valid precision and scale values for the NUMERIC data type. b) TYPE indicates DECIMAL and PRECISION and SCALE are valid precision and scale values for the DECIMAL data type. c) TYPE indicates FLOAT and PRECISION is a valid precision value for the FLOAT data type. d) TYPE indicates INTEGER, SMALLINT, REAL, or DOUBLE PRECISION. e) TYPE indicates BIT or BIT VARYING and LENGTH is a valid length value for the BIT date type. f) TYPE indicates CHARACTER or CHARACTER VARYING, LENGTH is a valid length value for the CHARACTER data type, and CHARACTER_SET_CATALOG, CHARACTER_SET_SCHEMA, and CHARACTER_ SET_NAME are a valid qualified character set name for the CHARACTER data type. g) TYPE indicates a , DATETIME_INTERVAL_CODE is a code specified in Table 19, "Codes associated with datetime data types in Dynamic SQL", and PRECISION is a valid value for the in which the i-th in each is E1 and either: i) P is not an , or ii) P is an and the is not the simply contained in the . k) P contains an with an in which E1 is both the and the first of the . l) P contains a in which both immediately contained s are E1. m) P contains a or a whose immediately contained is E1. n) P contains a whose operand is E1. o) P contains a whose or is E1. p) P contains a whose is E1. q) P contains a . 444 Database Language SQL X3H2-92-154/DBL CBR-002 17.6 5) Case: a) If E1 is followed by an IQ, then the data type of E1 is assumed to be INTERVAL IQ. b) In OCTET_LENGTH(E1), CHARACTER_LENGTH(E1), and CHARACTER_ LENGTH(E1), the data type of E1 is assumed to be CHARACTER VARYING(L), where L is the implementation-defined maximum value of for CHARACTER VARYING. c) In POSITION(X1 IN X2), and SUBSTRING(X1 FROM X3 FOR X4), if X1 (X2) meets the criteria for E1, E2, E3, and E4, then the data type of X1 (X2) is assumed to be CHARACTER VARYING(L), where L is the implementation-defined maximum value of for CHARACTER VARYING. If X3 (X4) meets the cri- teria for E1, E2, E3, and E4, then the data type of X3 (X4) is assumed to be NUMERIC(P,0), where P is the implementation- defined maximum value of for NUMERIC. d) In a of the form "X1 X2", if X1 (X2) meets the criteria for E1, E2, E3, and E4, then the data type of X1 (X2) is assumed to be CHARACTER VARYING(L), where L is the implementation-defined maximum value of for CHARACTER VARYING. e) In BIT_LENGTH(E1), the data type of E1 is assumed to be BIT VARYING(L), where L is the implementation-defined maximum value of for BIT VARYING. f) In a of the form "E1 + ", " + E1" or " - E1", the data type of E1 is assumed to be Case: i) If the is a date data type, then the data type of E1 is assumed to be INTERVAL YEAR(P) TO MONTH, where P is the implementation-defined maximum . ii) Otherwise, the data type of E1 is assumed to be INTERVAL DAY(P) TO SECOND(F), where P and F are the implementation- defined maximum and max- imum , respectively. g) In a of the form " * E1" or " / E1", the data type of E1 is assumed to be NUMERIC(P,0), where P is the implementation-defined maximum value of for NUMERIC. h) In all other s of the form "E1+ F", "E1- F", "E1* F", "E1/ F", "F + E1", "F - E1", "F * E1", or "F / E1", the data type of E1 is assumed to be the data type of F. Dynamic SQL 445 X3H2-92-154/DBL CBR-002 17.6 i) In a of the form "CAST (E1 AS )", "CAST (E1 AS )", the data type of E1 is the of the specified domain or the explicitly- specified . j) If one or more operands of COALESCE are E1, then the data type of E1 is assumed to be the data type of the first operand. k) If one or more s in a are E1, then the data type of E1 is assumed to be the data type of the first . l) If one operand of NULLIF is E1, then the data type of E1 is assumed to be the data type of the other operand. m) In the first and second operands of a or , or the first and third operands of a , if the i-th value of one operand is E1, then the data type of E1 is the data type of the i-th value of the other operand. n) In the first and second operands of an , if either of the first s is E1, then the data type of E1 is the data type of the first of the other operand. If both of the first s are E1, then the data type of each E1 is assumed to be TIMESTAMP WITH TIME ZONE. o) In a in which the i-th of some is E1 that contains a whose i-th is not E1, the data type of E1 is the data type of the i-th of the first whose i-th is not E1. p) In a in which the i-th in each is E1 that is the simply contained in an , the data type of E1 is the data type of the corresponding column of the implicit or explicit con- tained in the . q) In an that specifies a , the data types of s E1 in the are assumed to be the same as the data types of the respective columns of the . r) In an that specifies an , if the is not E1, then let D be its data type. Otherwise, let D be the data type of the first of the . The data type of any E1 in the is assumed to be D. 446 Database Language SQL X3H2-92-154/DBL CBR-002 17.6 s) If E1 appears for , , or in , then the data type of E1 is assumed to be CHARACTER VARYING(L), where L is the implementation-defined maximum value of for CHARACTER VARYING. t) If any value in the of a or is E1, then the data type of E1 is assumed to be the same as the data type of the respective column of the . u) If in , , , or is E1, then the data type of E1 is assumed to be CHARACTER VARYING(L), where L is the implementation-defined maximum value of for CHARACTER VARYING. v) If in is E1, then the data type of E1 is assumed to be CHARACTER VARYING(L), where L is the implementation-defined maximum value of for CHARACTER VARYING. w) If a in a is E1, then the data type of E1 is assumed to be the same data type as the corresponding . 6) If the value of the identifies an existing prepared statement, then an implicit DEALLOCATE PREPARE SSN is executed, where SSN is the value of the . 7) P is prepared for execution. 8) If is specified for the , then let S be the character string that is the value of the and let V be the character string that is the result of TRIM ( BOTH ' ' FROM S ) If V does not conform to the Format and Syntax Rules of an , then an exception condition is raised: invalid SQL statement identifier. 9) Case: a) If is specified for the , then the value of the is associated with the prepared statement. This value and ex- plicit or implied shall be specified for each or that is to be associated with this prepared statement. Dynamic SQL 447 X3H2-92-154/DBL CBR-002 17.6 b) If is specified for the , then: i) If P is a and is associated with a cursor C through a , then an association is made between C and P. The association is preserved until the prepared statement is destroyed. ii) If P is not a and is associated with a cursor C through a , then an exception condition is raised: dynamic SQL error- prepared statement is not a cursor specification. iii) Otherwise, the same shall be specified for each that is to be associated with this prepared statement. 10)The validity of an value or a in an SQL-transaction different from the one in which the statement was prepared is implementation-dependent. Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. 448 Database Language SQL X3H2-92-154/DBL CBR-002 17.7 17.7 Function Deallocate SQL-statements that have been prepared with a . Format ::= DEALLOCATE PREPARE Syntax Rules 1) If is a , then the that contains the shall also contain a that specifies the same . Access Rules None. General Rules 1) If the does not identify a statement pre- pared in the scope of the , then an excep- tion condition is raised: invalid SQL statement name. 2) If the value of identifies an existing prepared statement that is the of an open cursor, then an exception condition is raised: invalid cursor state. 3) The prepared statement identified by the is destroyed. Any cursor that was allocated with an that is associated with the prepared state- ment identified by the is destroyed. If the value of the identifies an existing prepared statement that is a , then any prepared statements that reference that cursor are destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to Intermediate SQL restrictions: None. Dynamic SQL 449 X3H2-92-154/DBL CBR-002 17.8 17.8 Function Obtain information about the columns for the prepared statement is stored in the specified SQL descriptor area as follows: a) Let N be the specified when the was allocated. Dynamic SQL 451 X3H2-92-154/DBL CBR-002 17.9 b) If the prepared statement that is being described is a or a , then let T be the table defined by the prepared statement and let D be the degree of T. Otherwise, let D be 0. c) COUNT is set to D. d) If D is greater than N, then a completion condition is raised: warning-insufficient item descriptor areas. e) If D is 0 or D is greater than N, then no item descriptor areas are set. Otherwise, the first D item descriptor areas are set so that the i-th item descriptor area contains the descriptor of the i-th column of T. The descriptor of a col- umn consists of values for TYPE, NULLABLE, NAME, UNNAMED, and other fields depending on the value of TYPE as described be- low. The DATA and INDICATOR fields are not relevant in this case. Those fields and fields that are not applicable for a particular value of TYPE are set to implementation-dependent values. i) TYPE is set to a code, as shown in Table 18, "Codes used for SQL data types in Dynamic SQL", indicating the data type of the column. ii) NULLABLE is set to 1 if the resulting column is possibly nullable and 0 otherwise. iii) If the column name is implementation-dependent, then NAME is set to the implementation-dependent name of the column, and UNNAMED is set to 1. Otherwise, NAME is set to the name for the column and UNNAMED is set to 0. iv) Case: 1) If TYPE indicates a , then: LENGTH is set to the length or maximum length in char- acters of the character string; OCTET_LENGTH is set to the maximum possible length in octets of the character string; CHARACTER_SET_CATALOG, CHARACTER_SET_SCHEMA and CHARACTER_SET_NAME are set to the of the character string's character set; and COLLATION_ CATALOG, COLLATION_SCHEMA and COLLATION_NAME are set to the of the character string's colla- tion. If the subject specifies C, then the lengths specified in LENGTH and OCTET_LENGTH do not include the implementation-defined null character that terminates a C character string. 452 Database Language SQL X3H2-92-154/DBL CBR-002 17.9 2) If TYPE indicates a , then LENGTH is set to the length or maximum length in bits of the bit string and OCTET_LENGTH is set to the maximum possible length in octets of the bit string. 3) If TYPE indicates an , then PRECISION and SCALE are set to the precision and scale of the exact numeric. 4) If TYPE indicates an , then PRECISION is set to the precision of the approximate numeric. 5) If TYPE indicates a , then LENGTH is set to the length in positions of the datetime type, DATETIME_INTERVAL_CODE is set to a code as specified in Table 19, "Codes associated with datetime data types in Dynamic SQL", to indicate the specific datetime data type, and PRECISION is set to the of a created or declared local temporary table and if the is not in the same as the that prepared the pre- pared statement, then an exception condition is raised: syntax rule or access rule violation in dynamic SQL statement. Dynamic SQL 459 X3H2-92-154/DBL CBR-002 17.10 4) If P contains s and a is not specified, then an exception condition is raised: dynamic SQL error- using clause required for dynamic parameters. 5) If P is a and a is not specified, then an exception condition is raised: dynamic SQL error-using clause required for result fields. 6) If a that is is specified, then the General Rules specified in Subclause 17.9, "", for a in an are applied. 7) If P is a , then the General Rules specified in Subclause 17.9, "", for a in an are applied. 8) P is executed. Case: a) If P is a , then all General Rules in Subclause 13.5, " of P. b) If the is a , then all General Rules in Subclause 17.19, "", apply to the . c) If the is a , then all General Rules in Subclause 17.20, "", apply to the . d) Otherwise, the results of the execution are the same as if the statement was contained in a and executed; these are described in Subclause 12.3, "". Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 460 Database Language SQL X3H2-92-154/DBL CBR-002 17.10 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. Dynamic SQL 461 X3H2-92-154/DBL CBR-002 17.11 17.11 Function Dynamically prepare and execute a preparable statement. Format ::= EXECUTE IMMEDIATE Syntax Rules 1) The data type of shall be character string. Access Rules None. General Rules 1) Let P be the contents of the . 2) If P is a or a , then P refers to either a dynamic cursor with the same or to an extended dynamic cursor whose value is the same as the . Case: a) If both an extended dynamic cursor and a dynamic cursor with the same name as the exist, then an exception condition is raised: ambiguous cursor name. b) If there is neither an extended dynamic cursor nor a dynamic cursor with the name of , then an exception condition is raised: invalid cursor name. 3) If one or more of the following are true, then an exception condition is raised: syntax error or access rule violation in dynamic SQL statement. a) P does not conform to the Format, Syntax Rules, and Access Rules for a or P is a or a . b) P contains a . c) P contains a . 4) The that is the value of the is prepared and executed. 462 Database Language SQL X3H2-92-154/DBL CBR-002 17.11 Case: a) If the is a , then all General Rules in Subclause 17.19, "", apply to the . b) If the is a , then all General Rules in Subclause 17.20, "", apply to the . c) Otherwise, the results of the execution are the same as if the statement was contained in a and executed; these are described in Subclause 12.3, "". Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. Dynamic SQL 463 X3H2-92-154/DBL CBR-002 17.12 17.12 Function Declare a cursor to be associated with a , which may in turn be associated with a . Format ::= DECLARE [ INSENSITIVE ] [ SCROLL ] CURSOR FOR Syntax Rules 1) The shall not be identical to the specified in any other or in the same . 2) The containing shall contain a whose is the same as the of the . Access Rules None. General Rules 1) All General Rules of Subclause 13.1, "" apply to , replacing "" with "" and "" with "prepared statement". Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no that specifies INSENSITIVE. b) If an of FOR UPDATE with or without a is specified, then neither SCROLL nor ORDER BY shall be specified. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. 464 Database Language SQL X3H2-92-154/DBL CBR-002 17.13 17.13 Function Define a cursor based on a for a . Format ::= ALLOCATE [ INSENSITIVE ] [ SCROLL ] CURSOR FOR Syntax Rules None. Access Rules None. General Rules 1) When the is executed, if the value of the does not identify a statement previously prepared in the scope of the , then an exception condition is raised: invalid SQL state- ment name. 2) If the prepared statement associated with the is not a , then an exception condition is raised: dynamic SQL error-prepared statement not a cursor specification. 3) All General Rules of Subclause 13.1, "" apply to , replacing "" with "" and "" with "prepared statement". 4) Let S be the character string that is the value of the immediately contained in . Let V be the character string that is the result of TRIM ( BOTH ' ' FROM S ) If V does not conform to the Format and Syntax Rules of an , then an exception condition is raised: invalid cursor name. Dynamic SQL 465 X3H2-92-154/DBL CBR-002 17.13 5) If the value of the is identical to the value of the of any other cursor al- located in the scope of the , then an exception condition is raised: invalid cursor name. 6) An association is made between the value of the and the prepared statement in the scope of the . The association is preserved until the prepared statement is destroyed, at which time the cursor identified by is also destroyed. Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall not contain any . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 466 Database Language SQL X3H2-92-154/DBL CBR-002 17.14 17.14 Function Associate input parameters with a and open the cursor. Format ::= OPEN [ ] Syntax Rules 1) If DCN is a CN, then the containing shall contain a whose is CN. 2) Let CR be the cursor identified by DCN. Access Rules 1) The Access Rules for the simply contained in the prepared statement associated with the are applied. General Rules 1) If is a and the of the associated is not associ- ated with a prepared statement, then an exception condition is raised: invalid SQL statement name. 2) If is an whose value does not identify a cursor allocated in the scope of the , then an exception condition is raised: invalid cursor name. 3) If the prepared statement associated with the contains s and a is not specified, then an exception condition is raised: dynamic SQL error-using clause required for dynamic parameters. 4) The cursor specified by is updatable if and only if the associated specified an updatable cursor. Note: updatable cursor is defined in Subclause 13.1, "". 5) If a is specified, then the General Rules spec- ified in Subclause 17.9, "", for are applied. Dynamic SQL 467 X3H2-92-154/DBL CBR-002 17.14 6) All General Rules of Subclause 13.2, "", apply to the . Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. 468 Database Language SQL X3H2-92-154/DBL CBR-002 17.15 17.15 Function Fetch a row for a cursor declared with a . Format ::= FETCH [ [ ] FROM ] Syntax Rules 1) If is omitted, then NEXT is implicit. 2) The shall specify INTO. 3) If DCN is a CN, then the containing shall contain a whose is CN. 4) Let CR be the cursor identified by DCN and let T be the table defined by the of CR. 5) If the implicit or explicit is not NEXT, then the or associated with CR shall specify SCROLL. 6) The number of s in or the number of item descriptor areas in the SQL descriptor area referenced by , as appropriate, shall be the same as the degree of T. The i-th in or the i-th item descriptor area of the SQL de- scriptor area, as appropriate, corresponds with the i-th column of T. 7) The Syntax Rules of Subclause 9.1, "Retrieval assignment", apply to each corresponding in and each column of T as TARGET and VALUE, respectively. Access Rules None. General Rules 1) The General Rules specified in Subclause 17.9, "", for are applied. Dynamic SQL 469 X3H2-92-154/DBL CBR-002 17.15 2) All General Rules of Subclause 13.3, "", ap- ply to the , replacing "targets in the " and "targets identified by the " with "s in the or item descriptor areas of the SQL descriptor area, as appropriate". Leveling Rules 1) The following restrictions apply for Intermediate SQL: 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. 470 Database Language SQL X3H2-92-154/DBL CBR-002 17.16 17.16 Function Close a cursor. Format ::= CLOSE Syntax Rules 1) If DCN is a CN, then the containing shall contain a whose is CN. 2) Let CR be the cursor identified by DCN. Access Rules None. General Rules 1) All General Rules of Subclause 13.4, "", apply to the . Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. Dynamic SQL 471 X3H2-92-154/DBL CBR-002 17.17 17.17 Function Delete a row of a table. Format ::= DELETE FROM WHERE CURRENT OF Syntax Rules 1) If DCN is a CN, then the containing shall contain a whose is CN. 2) Let CR be the cursor identified by DCN. 3) CR shall be an updatable cursor. Note: updatable cursor is defined in Subclause 13.1, "". 4) Let T be the table identified by the . Let QS be the that is the simply underlying table of the simply underlying table of CR. The simply underlying table of QS shall be T. Note: The simply underlying table of a is defined in Subclause 13.1, "". Access Rules 1) All Access Rules of Subclause 13.6, "", apply to the . General Rules 1) All General Rules of Subclause 13.6, "", apply to the , replacing "" with "". Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 472 Database Language SQL X3H2-92-154/DBL CBR-002 17.17 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. Dynamic SQL 473 X3H2-92-154/DBL CBR-002 17.18 17.18 Function Update a row of a table. Format ::= UPDATE SET [ { }... ] WHERE CURRENT OF Syntax Rules 1) If DCN is a CN, then the containing shall contain a whose is CN. 2) Let CR be the cursor identified by DCN. 3) CR shall be an updatable cursor. Note: updatable cursor is defined in Subclause 13.1, "". 4) Let T be the table identified by the . Let QS be the that is the simply underlying table of the simply underlying table of CR. The simply underlying table of QS shall be T. Note: The simply underlying table of a is defined in Subclause 13.1, "". 5) If CR is an ordered cursor, then for each OC, the column of T identified by OC shall not be directly or in- directly referenced in the of the defining for CR. 6) No leaf generally underlying table of T shall be an underly- ing table of any generally contained in any immediately contained in any contained in the . 7) A in a shall not directly con- tain a . 8) The same shall not appear more than once in a . 9) If CR was specified using an explicit or implicit of FOR UPDATE, then each specified as an shall identify a column in the explicit or implicit associated with the . 474 Database Language SQL X3H2-92-154/DBL CBR-002 17.18 10)The scope of the is the entire . 11)For every , the Syntax Rules of Subclause 9.2, "Store assignment", apply to the column of T identified by the and the of the as TARGET and VALUE, respectively. Access Rules 1) All Access Rules of Subclause 13.9, "", apply to the . General Rules 1) All General Rules of Subclause 13.9, "", apply to the , replacing "" with "" and "" with "". Leveling Rules 1) The following restrictions apply for Intermediate SQL: None. 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: a) Conforming Entry SQL language shall not contain any Dynamic SQL language. Dynamic SQL 475 X3H2-92-154/DBL CBR-002 17.19 17.19 Function Delete a row of a table through a dynamic cursor. Format ::= DELETE [ FROM ] WHERE CURRENT OF Syntax Rules 1) If is not specified, then the name of the under- lying table of the identified by is implicit. 2) All Syntax Rules of Subclause 13.6, "", apply to the , replacing "" with " or " and "" with "". Access Rules 1) All Access Rules of Subclause 13.6, "", apply to the . General Rules 1) All General Rules of Subclause 13.6, "", apply to the , replacing "" with "". Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. 476 Database Language SQL X3H2-92-154/DBL CBR-002 17.20 17.20 Function Update a row of a table through a dynamic cursor. Format ::= UPDATE [ ] SET WHERE CURRENT OF Syntax Rules 1) If is not specified, then the name of the under- lying table of the identified by is implicit. 2) All Syntax Rules of Subclause 13.9, "", apply to the , replacing "" with " or " and "" with "". Access Rules 1) All Access Rules of Subclause 13.9, "", apply to the . General Rules 1) All General Rules of Subclause 13.9, "", apply to the , replacing "" with "". Leveling Rules 1) The following restrictions apply for Intermediate SQL: a) Conforming Intermediate SQL language shall contain no . 2) The following restrictions apply for Entry SQL in addition to any Intermediate SQL restrictions: None. Dynamic SQL 477 X3H2-92-154/DBL CBR-002 478 Database Language SQL X3H2-92-154/DBL CBR-002 18 Diagnostics management 18.1 Function Get exception or completion condition information from the diagnos- tics area. Format ::= GET DIAGNOSTICS ::= | ::= [ { }... ] ::= ::= NUMBER | MORE | COMMAND_FUNCTION | DYNAMIC_FUNCTION | ROW_COUNT ::= EXCEPTION [ { }... ] ::= ::= CONDITION_NUMBER | RETURNED_SQLSTATE | CLASS_ORIGIN | SUBCLASS_ORIGIN | SERVER_NAME Diagnostics management 479 X3H2-92-154/DBL CBR-002 18.1 | CONNECTION_NAME | CONSTRAINT_CATALOG | CONSTRAINT_SCHEMA | CONSTRAINT_NAME | CATALOG_NAME | SCHEMA_NAME | TABLE_NAME | COLUMN_NAME | CURSOR_NAME | MESSAGE_TEXT | MESSAGE_LENGTH | MESSAGE_OCTET_LENGTH ::= Syntax Rules 1) The data type of a contained in a or shall be the data type specified in Table 21, "s for use with ", for the corresponding or . 2) The data type of shall be exact numeric with scale 0. 480 Database Language SQL X3H2-92-154/DBL CBR-002 18.1 __Table_21-s_for_use_with___ ____________Data_Type__________________________________ |_________________s_______________| | | | | NUMBER exact numeric with scale 0 | | | | MORE | character string (1) | | | | | COMMAND_FUNCTION | character varying (L) | | | | | DYNAMIC_FUNCTION | character varying (L) | | | | |_ROW_COUNT____________|_exact_numeric_with_scale_0________________| | | | |_________________s_______________| | | | | CONDITION_NUMBER exact numeric with scale 0 | | | | RETURNED_SQLSTATE | character string (5) | | | | | CLASS_ORIGIN | character varying (L) | | | | | SUBCLASS_ORIGIN | character varying (L) | | | | | SERVER_NAME | character varying (L) | | | | | CONNECTION_NAME | character varying (L) | | | | | CONSTRAINT_CATALOG | character varying (L) | | | | | CONSTRAINT_SCHEMA | character varying (L) | | | | | CONSTRAINT_NAME | character varying (L) | | | | | CATALOG_NAME | character varying (L) | | | | | SCHEMA_NAME | character varying (L) | | | | | TABLE_NAME | character varying (L) | | | | | COLUMN_NAME | character varying (L) | | | | | CURSOR_NAME | character varying (L) | | | | | MESSAGE_TEXT | character varying (L) | | | | | MESSAGE_LENGTH | exact numeric with scale 0 | | | | |_MESSAGE_OCTET_LENGTH_|_exact_numeric_with_scale_0________________| | | | | Where L is an impleme|tation-defined integer not less than 128. | |______________________|___________________________________________| | | Access Rules None. Diagnostics management 481 X3H2-92-154/DBL CBR-002 18.1 General Rules 1) Specification of retrieves informa- tion about the statement execution recorded in the diagnostics area into . a) The value of NUMBER is the number of exception or completion conditions that have been stored in the diagnostics area as a result of executing the previous SQL-statement other than a . Note: The itself may return infor- mation via the SQLCODE or SQLSTATE parameters, but does not modify the previous contents of the diagnostics area. b) The value of MORE is: Y More conditions were raised during execution of the SQL-statement than have been stored in the diagnostics area. N All of the conditions that were raised during execution of the SQL-statement have been stored in the diagnostics area. c) The value of COMMAND_FUNCTION is the identification of the SQL-statement executed. Table 22, "SQL-statement character codes for use in the diagnostics area" specifies the identi- fier of the SQL-statements. d) The value of DYNAMIC_FUNCTION is the identification of the prepared statement executed. Table 22, "SQL-statement char- acter codes for use in the diagnostics area", specifies the identifier of the SQL-statements. Table 22-SQL-statement character _______________codes_for_use_in_the_diagnostics_area_______________ _SQL-statement____________________Identifier_______________________ | | ALLOCATE CURSOR | | | | | | | ALTER DOMAIN | | | | | | ALTER TABLE | | | | | | CREATE ASSERTION | | | | | | CREATE CHARACTER SET | | | | | | CLOSE CURSOR | | | | | | CREATE COLLATION | | | | | | COMMIT WORK | | | | 482 Database Language SQL X3H2-92-154/DBL CBR-002 18.1 Table 22-SQL-statement character codes ______________for_use_in_the_diagnostics_area_(Cont.)______________ _SQL-statement____________________Identifier_______________________ | | CONNECT | | | | | | | | | | | | | DELETE CURSOR | | | | | | DELETE WHERE | | | | | | DESCRIBE | | | | | | | DISCONNECT | | | | | | CREATE DOMAIN | | | | | | DROP ASSERTION | | | | | | DROP CHARACTER SET | | | | | | DROP COLLATION | | | | | | DROP DOMAIN | | | | | | DROP SCHEMA | | | | | | DROP TABLE | | | | | | DROP TRANSLATION | | | | | | DROP VIEW | | | | | | DYNAMIC CLOSE | | | | | | | DYNAMIC FETCH | | | | | | DYNAMIC OPEN | | | | | | |
__label__pos
0.848712
add_custom_target ----------------- Add a target with no output so it will always be built. .. code-block:: cmake add_custom_target(Name [ALL] [command1 [args1...]] [COMMAND command2 [args2...] ...] [DEPENDS depend depend depend ... ] [BYPRODUCTS [files...]] [WORKING_DIRECTORY dir] [COMMENT comment] [VERBATIM] [USES_TERMINAL] [COMMAND_EXPAND_LISTS] [SOURCES src1 [src2...]]) Adds a target with the given name that executes the given commands. The target has no output file and is *always considered out of date* even if the commands try to create a file with the name of the target. Use the :command:`add_custom_command` command to generate a file with dependencies. By default nothing depends on the custom target. Use the :command:`add_dependencies` command to add dependencies to or from other targets. The options are: ``ALL`` Indicate that this target should be added to the default build target so that it will be run every time (the command cannot be called ``ALL``). ``BYPRODUCTS`` Specify the files the command is expected to produce but whose modification time may or may not be updated on subsequent builds. If a byproduct name is a relative path it will be interpreted relative to the build tree directory corresponding to the current source directory. Each byproduct file will be marked with the :prop_sf:`GENERATED` source file property automatically. Explicit specification of byproducts is supported by the :generator:`Ninja` generator to tell the ``ninja`` build tool how to regenerate byproducts when they are missing. It is also useful when other build rules (e.g. custom commands) depend on the byproducts. Ninja requires a build rule for any generated file on which another rule depends even if there are order-only dependencies to ensure the byproducts will be available before their dependents build. The ``BYPRODUCTS`` option is ignored on non-Ninja generators except to mark byproducts ``GENERATED``. ``COMMAND`` Specify the command-line(s) to execute at build time. If more than one ``COMMAND`` is specified they will be executed in order, but *not* necessarily composed into a stateful shell or batch script. (To run a full script, use the :command:`configure_file` command or the :command:`file(GENERATE)` command to create it, and then specify a ``COMMAND`` to launch it.) If ``COMMAND`` specifies an executable target name (created by the :command:`add_executable` command) it will automatically be replaced by the location of the executable created at build time. If set, the :prop_tgt:`CROSSCOMPILING_EMULATOR` executable target property will also be prepended to the command to allow the executable to run on the host. Additionally a target-level dependency will be added so that the executable target will be built before this custom target. Arguments to ``COMMAND`` may use :manual:`generator expressions `. References to target names in generator expressions imply target-level dependencies. The command and arguments are optional and if not specified an empty target will be created. ``COMMENT`` Display the given message before the commands are executed at build time. ``DEPENDS`` Reference files and outputs of custom commands created with :command:`add_custom_command` command calls in the same directory (``CMakeLists.txt`` file). They will be brought up to date when the target is built. Use the :command:`add_dependencies` command to add dependencies on other targets. ``COMMAND_EXPAND_LISTS`` Lists in ``COMMAND`` arguments will be expanded, including those created with :manual:`generator expressions `, allowing ``COMMAND`` arguments such as ``${CC} "-I$,;-I>" foo.cc`` to be properly expanded. ``SOURCES`` Specify additional source files to be included in the custom target. Specified source files will be added to IDE project files for convenience in editing even if they have no build rules. ``VERBATIM`` All arguments to the commands will be escaped properly for the build tool so that the invoked command receives each argument unchanged. Note that one level of escapes is still used by the CMake language processor before ``add_custom_target`` even sees the arguments. Use of ``VERBATIM`` is recommended as it enables correct behavior. When ``VERBATIM`` is not given the behavior is platform specific because there is no protection of tool-specific special characters. ``USES_TERMINAL`` The command will be given direct access to the terminal if possible. With the :generator:`Ninja` generator, this places the command in the ``console`` :prop_gbl:`pool `. ``WORKING_DIRECTORY`` Execute the command with the given current working directory. If it is a relative path it will be interpreted relative to the build tree directory corresponding to the current source directory. Arguments to ``WORKING_DIRECTORY`` may use :manual:`generator expressions `.
__label__pos
0.92424
Repeat a String challenge Tell us what’s happening: Hi everybody. Below is my code that doesn’t pass the FCC test. I’ve managed to solve the problem using a ‘while loop’ already and have passed the test. I understand my initial code below is way too complicated for what needs to be done. But it still does the job, I checked it on Codepen. Is it just an issue with an FCC test utility or is there something wrong with my code? Your code so far function repeatStringNumTimes(str, num) { // repeat after me let repeatString = ''; if (num > 0) { //make sure the number is positive, otherwise str = '' for (let j = 1; j <= num; j++) //repeat string 'num' number of times for (let i = 0; i < str.length; i++) { //iterate through all characters of the string repeatString = repeatString + str[i] } } else { str = '' }; str = repeatString; return str; } repeatStringNumTimes("abc", 3); Your browser information: User Agent is: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36. Link to the challenge: https://learn.freecodecamp.org/javascript-algorithms-and-data-structures/basic-algorithm-scripting/repeat-a-string-repeat-a-string Hey RomanB your code functionally correct. i have check with different input value and it’s output correctly. code is running fine. I think the FCC does’t specify the check this type algorithm. another why are you enter your code ; semicolon at the end of else block . Good Luck. 1 Like Hey Roman, You have not put {} for the first for loop. Otherwise your code runs perfect. 1 Like I’ve added some comments that might help you work through the logic. function repeatStringNumTimes(str, num) { let repeatString = ''; if (num > 0) { /* this condition is not necessary. For any string `str`, "" is zero repeats of `str`; and "" is already the value of your `repeatString` variable. */ for (let j = 1; j <= num; j++) { /* it's simpler and more standard to start from zero and use greater-than rather than greater-than-or-equal as the condition */ for (let i = 0; i < str.length; i++) { /*there's no reason to iterate through each character of the string. you can just concatenate the whole string on the end for the same effect. */ repeatString = repeatString + str[i] //consider using the `+=` operator instead. } } } else { str = '' }; /* if `num` is 0, `repeatString` is already "", so this is redundant. */ str = repeatString; // instead of these two lines, return str; // why not just directly return `repeatString`? } repeatStringNumTimes("abc", 3); 1 Like Thank you @lionel-rowe for your detailed comments. It took me a while to understand why “if (num>0)” condition is redundant. But I finally got it :slight_smile: I have finished all the challenges from Basic Algorithm Scripting section of FCC curriculum and I’m pretty sure many of my solutions are too lengthy and not optimal. But I’m not sure if I should find and examine the best solutions. Thank you once again.
__label__pos
0.998984
#!/usr/bin/env python # Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that copying files preserves file attributes. """ import TestGyp import os import stat import sys def check_attribs(path, expected_exec_bit): out_path = test.built_file_path(path, chdir='src') in_stat = os.stat(os.path.join('src', path)) out_stat = os.stat(out_path) if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit: test.fail_test() test = TestGyp.TestGyp() test.run_gyp('copies-attribs.gyp', chdir='src') test.build('copies-attribs.gyp', chdir='src') if sys.platform != 'win32': out_path = test.built_file_path('executable-file.sh', chdir='src') test.must_contain(out_path, '#!/bin/bash\n' '\n' 'echo echo echo echo cho ho o o\n') check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR) test.pass_test()
__label__pos
0.713964
Passing json objects into ipcRenderer.send #1 Any code example on how I can pass in a json object into send / recv function through electron ipc functions. #2 There are examples in the documentation on how to send and receive messages in both directions: http://electron.atom.io/docs/api/ipc-main/#sending-messages #3 All examples i have come across seem to pass just one string as argument Ex: win.webContents.send(‘ping’, ‘whoooooooh!’) Can I do something like win.webContents.send(‘ping’, {keys:[‘name’:‘whoooooooh!’, ‘value’:‘woooww’]}) #4 Yes, you can.
__label__pos
0.641541
OnbasiSoner OnbasiSoner - 10 months ago 47 JSON Question How can i create the json object on python? I tried to create a json object but I made a mistake somewhere. I'm getting some data on CSV file (center is string, lat and lng are float). My codes: data = [] data.append({ 'id': 'id', 'is_city': false, 'name': center, 'county': center, 'cluster': i, 'cluster2': i, 'avaible': true, 'is_deleted': false, 'coordinates': ('{%s,%s}' %(lat,lng)) }) json_data = json.dumps(data) print json_data It goes with this: [{ "county": "County", "is_city": false, "is_deleted": false, "name": "name", "cluster": 99, "cluster2": 99, "id": "id", "coordinates": "{41.0063945,28.9048234}", "avaible": true }] That's I want: { "id" : "id", "is_city" : false, "name" : "name", "county" : "county", "cluster" : 99, "cluster2" : 99, "coordinates" : [ 41.0870185, 29.0235126 ], "available" : true, "isDeleted" : false, } Answer You are defining coordinates to be a string of the specified format. There is no way json can encode that as a list; you are saying one thing when you want another. Similarly, if you don't want the top-level dictionary to be the only element in a list, don't define it to be an element in a list. data = { 'id': 'id', 'is_city': false, 'name': name, 'county': county, 'cluster': i, 'cluster2': i, 'available': true, 'is_deleted': false, 'coordinates': [lat, lng] } I don't know how you defined center, or how you expected it to have the value 'name' and the value 'county' at basically the same time. I have declared two new variables to hold these values; you will need to adapt your code to take care of this detail. I also fixed the typo in "available" where apparently you expected Python to somehow take care of this.
__label__pos
0.997826
Use a password protected Access database in C# [password protected Access database] This example shows how you can open and use a password protected Access database in C#. It also shows how you can use the database’s password protection to provide a login form for your program. Note that the security used by a password protected Access database may not be very secure. There are several program available for download that claim to recover lost passwords. Different versions of Access databases use laughably simple security so it may be possible to recover their passwords by simply looking in their headers. For more information, see the Stack Overflow post Access mdb password recovery. In any case, this program demonstrates a general technique that you can use with other kinds of databases. For example, you could use a similar approach to provide a login for an Oracle, SQL Server, MySQL, or other database. Before I explain the example’s code, I should tell you how you can make a password protected Access database. Making a Password Protected Access Database The details for making a password protected Access database varies depending on the version. In Access 2013, first open the database for exclusive use. To do that, press Ctrl+O and browse for the database file. After you select the file, click the dropdown arrow next to the Open button and select Open Exclusive. Next, open the File menu. Select the Info tab and click Set Database Password. Enter and verify the password and click OK. If you are using another version of Access, you may be able to find features similar to those described here. Otherwise search the internet for instructions for your version. The Example This example is similar to the example Display database pictures in a ListView control in C# except it uses a password protected Access database. Download that example to see details about how the program loads data from the database into a ListView control. When this example starts, the following Form_Load event handler executes to get the database password. // Get the password from the user. private void Form1_Load(object sender, EventArgs e) { // Get the password from the user. PasswordForm frm = new PasswordForm(); if (frm.ShowDialog() == DialogResult.Cancel) Close(); string password = frm.txtPassword.Text; // Select the first style. cboStyle.SelectedIndex = 0; // Initialize the ListView. lvwBooks.SmallImageList = imlSmallIcons; lvwBooks.LargeImageList = imlLargeIcons; // Make the column headers. lvwBooks.MakeColumnHeaders( "Title", 230, HorizontalAlignment.Left, "URL", 220, HorizontalAlignment.Left, "ISBN", 130, HorizontalAlignment.Left, "Picture", 230, HorizontalAlignment.Left, "Pages", 50, HorizontalAlignment.Right, "Year", 60, HorizontalAlignment.Right); // Compose the database file name. // This assumes it's in the executable's directory. string db_name = Application.StartupPath + "\\books_with_images.mdb"; // Compose the connect string. string connect_string = "Provider=Microsoft.ACE.OLEDB.12.0;" + "Data Source=" + db_name + ";" + "Mode=Share Deny None" + ";Jet OLEDB:Database Password=" + password; try { // Try to connect to the database. OleDbConnection conn = new OleDbConnection(connect_string); conn.Open(); // Load the data. LoadData(conn); conn.Close(); conn.Dispose(); } catch (Exception ex) { Console.WriteLine(ex.Message); MessageBox.Show("Invalid password"); Close(); } } The code first displays a PasswordForm. That form simply contains a password text box and OK and Cancel buttons. (See the picture at the top of this post.) The password form’s OK and Cancel buttons have DialogResult properties set to OK and Cancel, respectively. If the user clicks the OK button, then the Load event handler’s call to ShowDialog returns OK. If it returns anything else, for example if the user clicks Close or presses Alt+F4, ShowDialog returns Cancel and the Load closes the main program’s form and the program ends. If all goes well, the Load event handler stores the password entered by the user in the variable password. The code then performs a few tasks to prepare the main form’s ListView control. Next, the program composes a string holding the database’s name. In this example at design time, I opened Solution Explorer, right-clicked the project entry, and selected Add > Existing Item. I then selected the database and clicked Add. When you add a database to a project in this way, Visual Studio tries to create a connection for the database and add it to the project. This program doesn’t need that connection, so I closed the resulting dialogs. [password protected Access database] Because this example uses a password protected Access database, Visual Studio displays the dialog on the right. Whenever you see this dialog, click No to prevent Visual Studio from including a database password in a configuration string that any hacker could read. In this example, the program will get the database password from the user at run time. After it composes the database’s name, the program builds a connect string by filling in the database’s path and the password entered by the user. You may need to modify the string depending on the type of database and your runtime database engine. Next, the program starts a try catch block. It creates an OleConnection object passing its constructor the connect string. The code then tries to open the connection. This is where the program will fail if the password is incorrect. If there is an error, the catch block writes the exception message to the Console window, displays an error message, and closes the main for to end the program. In general, you should not give detailed error messages to the user at this point. For example, if you are using a more sophisticated database that requires a user ID and password, you should not tell the user that the user ID is invalid because that lets an attacker know that he should try a different user name. Similarly, you should not say that the user name is valid but the password is invalid. That lets an attacker know that he has a valid user name. Simply give a generic “Invalid user ID/password” message and leave it at that. If any other error occurs, for example if the code tries to use a database table that doesn’t exist, the user sees an incorrect error message. If you run the program in Visual Studio, you can read the true message from the Console window. If the database opens successfully, the program calls the LoadData method to load data into the program’s ListView control. Download the example and see the earlier post for information about how that works. Finally, the program closes and disposes the database connection. If you need to use the database again later, store the connection in a form-level variable so you can reuse it. Because the connect string is already filled in, you will only need to open, use, and close the connection. Download the example and give it a try. The password for the included database is Secret. Download Example   Follow me on Twitter   RSS feed   Donate About RodStephens Rod Stephens is a software consultant and author who has written more than 30 books and 250 magazine articles covering C#, Visual Basic, Visual Basic for Applications, Delphi, and Java. This entry was posted in database, Office and tagged , , , , , , , , , . Bookmark the permalink.
__label__pos
0.749982
top of page How to create a Client for your API — .NET Core Nowadays, the creation of APIs is a very common solution when we want to make some features of our system available for other components. Since that, we can put an API as a public component and allow you to use it. The thing is that if you want to make some requests in your backend you must create an HTTP client in order to do the necessary requests. Why is it good to provide a client library to your API? That’s the main question that you want to have the right answer. It helps you to ignore some operations when you are developing a service because it avoids you from reinventing the wheel every time. Regarding this, it will increase your time to focus on your own system and not in creating the code to make HTTP requests. The client already gives you a response object ready to use instead of giving you a raw response which you must serialize for an object. You also don’t need to know which version of an API are you using. The client abstracts you from this. If the API logic or rules change, you don’t need necessarily to update the client version if the interfaces not change, meaning this the input, output, or route of an endpoint didn’t change. Let’s talk about code… For this case let’s say that we have a database that contains all users and their groups from a company. There is an API that exposes that information. We have two controllers, one for users and another for groups. [ApiController] [Route("api/users/")] public class UserController : ControllerBase { private List<User> _inMemoryUsers; public UserController() { _inMemoryUsers = new List<User>() { new User(1, "John", "[email protected]", true), new User(2, "Lisa", "[email protected]", true), new User(3, "Bernard", "[email protected]", false) }; } [HttpGet("all")] public IEnumerable<User> GetAll() { return _inMemoryUsers; } [HttpGet("{id}")] public User GetById(int id) { return _inMemoryUsers.SingleOrDefault(u => u.Id == id); } } As you can see my “database” it’s just a list of User and Group. [Route("api/groups")] [ApiController] public class GroupController : ControllerBase { private List<Group> _inMemoryGroups; public GroupController() { _inMemoryGroups = new List<Group>() { new Group(1, "Manager", new []{ 1 }), new Group(2, "Developers", new []{ 2, 3 }) }; } [HttpGet("all")] public IEnumerable<Group> GetAll() { return _inMemoryGroups; } [HttpGet("{id}")] public Group GetById(int id) { return _inMemoryGroups.SingleOrDefault(u => u.Id == id); } } The challenge is to provide a client interface that gives you access to all the available endpoints. Client Architecture We have the main interface IApiClient that contains the following definition: public interface IApiClient { IUserClient UserClient { get; } IGroupClient GroupClient { get; } } Its implementation is also very simple: public class MyApiClient : IApiClient { public MyApiClient(IUserClient userClient, IGroupClient groupClient) { UserClient = userClient; GroupClient = groupClient; } public IUserClient UserClient { get; } public IGroupClient GroupClient { get; } } With this interface, we will have access to all of the interfaces to interact with the existing controllers. The next code block shows the interfaces from IUserClient , which knows how to interact with the /users endpoint and IGroupClient which knows something similar but regarding the /groups endpoint. public interface IUserClient { Task<IEnumerable<User>> GetAll(); Task<User> GetById(int id); }public interface IGroupClient { Task<IEnumerable<Group>> GetAll(); Task<Group> GetById(int id); } As we can see, these interface methods are very similar to the existing ones on respective controllers. What if I add a new endpoint on my API? For each new endpoint for any controller, we must create a new interface method on the client library side and implement it. After that, we just need to generate a new version of the client to be distributed. So at that moment, we have: • API implemented with two controllers • A client library with two interfaces, one for each controller • The main interface which contains all the available interfaces to interact with the existing endpoints What is the purpose of the MyHttpClient class? The goal of this class is to simplify the work that must be done in order to make HTTP requests to our API. Instead of doing the same code for each client implementation, we create those features here and extend from it. public class MyHttpClient { private readonly HttpClient _httpClient; private const string BASE_URL = "http://localhost:5000/api/"; public MyHttpClient() { _httpClient = new HttpClient(); } public async Task<string> GetRequest(string url) { string endpointPath = BASE_URL + url; // Make the request HttpResponseMessage response = _httpClient. GetAsync(endpointPath). Result; if (!response.IsSuccessStatusCode) throw new ArgumentException($"The path {endpointPath} gets the following status code: " + response.StatusCode); return await response.Content.ReadAsStringAsync(); } } To make the objective of this class clear let’s see the implementation of IUserClient : public class UserClient : MyHttpClient, IUserClient { public async Task<IEnumerable<User>> GetAll() { string result = await GetRequest("users/all"); return JsonConvert. DeserializeObject<IEnumerable<User>>(result); } public async Task<User> GetById(int id) { string result = await GetRequest($"users/{id}"); return JsonConvert.DeserializeObject<User>(result); } } How to configure the dependency injection registry? I recommend you create a ServiceCollection extension method to configure all the necessary dependencies. With this, the users of your client just need to add this method to the initial configuration class. public static void AddApiClient(this IServiceCollection services) { services.AddSingleton<IUserClient, UserClient>(); services.AddSingleton<IGroupClient, GroupClient>(); services.AddSingleton<IApiClient, MyApiClient>(); } Note: Take care of the order of this registration because if you register the IApiClient in the first place, the application will fail because it won't know how to inject IUserClient and IGroupClient which are necessary to use IApiClient interface. How is it to use the client library? In this example, I will use a WorkerService project template. In the perspective of who will using your client library just need to: public static IHostBuilder CreateHostBuilder(string[] args) => Host.CreateDefaultBuilder(args). ConfigureServices((hostContext, services) => { services.AddApiClient(); services.AddHostedService<Worker>(); }); In your Worker class you can use the IApiClient to get data from both existing clients. Let’s take a look at the ExecuteAsync method. protected override async Task ExecuteAsync(CancellationToken stoppingToken) { IEnumerable<User> users = await _apiClient.UserClient.GetAll(); Group group = await _apiClient.GroupClient.GetById(2); (...) } Conclusion In my opinion, this is a simple and efficient way to create clients for APIs. The fact of having multiple controllers doesn’t create any issue here, you just need to add and implement a new interface and declare it on the main interface of the library ( IApiClient in this example). This post was made based on .NET Core but the architecture design can be the same for other languages. With this type of client library, you will able to provide to your users a very practical way to interact with your API, without any necessity to give direct access to it. Source: Medium The Tech Platform www.thetechplatform.com 0 comments bottom of page
__label__pos
0.896681
[FrontPage] [TitleIndex] [WordIndex 1. The notification system MoinMoin 1.7 includes a new notification system. Supplemental to the basic email based notification you can use a separate process running a Jabber/XMPP notification bot. See http://www.jabber.org/ and http://www.xmpp.org/ for more information on this protocol. The bot can be used to send notifications about various events occuring in your Wiki, or to work with the Wiki interactively. As it's a separate process, it doesn't block waiting for all notifications to be sent, so this solution should be suitable for large sites that have many users subscribed to particular changes. 1.1. Features 1.2. Notification options When the notification_bot_uri option is present in your wiki's config, new options become available in your user preferences. Make sure to set your JID in your user preferences. A multi-selection list allows you to select which events you want to be notified about, and two new check-boxes let you specify if notifications should be sent via email, jabber or both. 1.3. Known main issues with the Jabber bot 1.3.1. You need a recent version of pyxmpp, 1.0 won't work Either use a recent snapshot or get it directly from svn repository with: svn checkout http://pyxmpp.jajcus.net/svn/pyxmpp/trunk pyxmpp Add the resulting pyxmpp directory to your PYTHONPATH or perform a "full installation" as described here: To build the package just invoke: python setup.py build To install it: python setup.py install If you had some older version of PyXMPP it is better to uninstall it first (delete pyxmpp subdirectory of your site-packages directory) before installing the new version or things might not work correctly. You may also try: make and: make install 1.3.2. Traffic limits Jabber servers usually have rather tight data rate limits, so if your site generates a lot of traffic, the notification bot may become unstable and/or unusable. If such a condition occurs, you should consider running your own Jabber/XMPP server with relaxed limits. 1.3.3. Internationalization For the i18n to work, your wiki must be accessible when the notification bot is being started. 1.4. Setting up a Jabber bot These are step-by-step instructions you have to perform in order to get the Jabber notification bot up and running: 1. Install a post-1.0 version of pyxmpp. 2. The Jabber bot lives in a jabberbot/ directory in MoinMoin's distribution archive. Copy it to a suitable place. Make sure that PYTHONPATH environment variable contains the directory that contains the jabberbot package. 3. Create an account for the bot to use using any Jabber/XMPP client. Currently it can't register the account by itself. 4. Edit the bot's configuration file, config.py, and set its account details and a secret used to communicate with the wiki. 5. Edit your wikiconfig and add notification_bot_uri and secret options to it. You will find an example in file wiki/config/more_samples/jabber_wikiconfig_snippet, distributed along with MoinMoin. Make sure to set the secret to exactly the same string as you used in the bot's configuration! 6. Launch / restart your wiki. 7. Launch the bot: python main.py 2013-07-17 10:42
__label__pos
0.713986
summaryrefslogtreecommitdiff path: root/src/include/taler_amount_lib.h blob: c1f42e473d313d4a70d0802364f274c1ebf9be26 (plain) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 /* This file is part of TALER Copyright (C) 2014, 2015, 2020 Taler Systems SA TALER is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. TALER is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with TALER; see the file COPYING. If not, see <http://www.gnu.org/licenses/> */ /** * @file include/taler_amount_lib.h * @brief amount-representation utility functions * @author Sree Harsha Totakura <[email protected]> */ #ifndef TALER_AMOUNT_LIB_H #define TALER_AMOUNT_LIB_H #ifdef __cplusplus extern "C" { #if 0 /* keep Emacsens' auto-indent happy */ } #endif #endif /** * @brief Number of characters (plus 1 for 0-termination) we use to * represent currency names (i.e. EUR, USD, etc.). We use 8+4 for * alignment in the `struct TALER_Amount`. The amount is typically an * ISO 4217 currency code when an alphanumeric 3-digit code is used. * For regional currencies, the first character should be a "*" followed * by a region-specific name (i.e. "*BRETAGNEFR"). */ #define TALER_CURRENCY_LEN 12 /** * Taler currency length as a string. */ #define TALER_CURRENCY_LEN_STR "12" /** * @brief The "fraction" value in a `struct TALER_Amount` represents which * fraction of the "main" value? * * Note that we need sub-cent precision here as transaction fees might * be that low, and as we want to support microdonations. * * An actual `struct Amount a` thus represents * "a.value + (a.fraction / #TALER_AMOUNT_FRAC_BASE)" units of "a.currency". */ #define TALER_AMOUNT_FRAC_BASE 100000000 /** * @brief How many digits behind the comma are required to represent the * fractional value in human readable decimal format? Must match * lg(#TALER_AMOUNT_FRAC_BASE). */ #define TALER_AMOUNT_FRAC_LEN 8 /** * Maximum legal 'value' for an amount, based on IEEE double (for JavaScript compatibility). */ #define TALER_AMOUNT_MAX_VALUE (1LLU << 52) GNUNET_NETWORK_STRUCT_BEGIN /** * @brief Amount, encoded for network transmission. */ struct TALER_AmountNBO { /** * Value in the main currency, in NBO. */ uint64_t value GNUNET_PACKED; /** * Fraction (integer multiples of #TALER_AMOUNT_FRAC_BASE), in NBO. */ uint32_t fraction GNUNET_PACKED; /** * Type of the currency being represented. */ char currency[TALER_CURRENCY_LEN]; }; GNUNET_NETWORK_STRUCT_END /** * @brief Representation of monetary value in a given currency. */ struct TALER_Amount { /** * Value (numerator of fraction) */ uint64_t value; /** * Fraction (integer multiples of #TALER_AMOUNT_FRAC_BASE). */ uint32_t fraction; /** * Currency string, left adjusted and padded with zeros. All zeros * for "invalid" values. */ char currency[TALER_CURRENCY_LEN]; }; /** * Parse monetary amount, in the format "T:V.F". * * @param str amount string * @param[out] amount amount to write the result to * @return #GNUNET_OK if the string is a valid monetary amount specification, * #GNUNET_SYSERR if it is invalid. */ enum GNUNET_GenericReturnValue TALER_string_to_amount (const char *str, struct TALER_Amount *amount); /** * Parse monetary amount, in the format "T:V.F". * The result is stored in network byte order (NBO). * * @param str amount string * @param[out] amount_nbo amount to write the result to * @return #GNUNET_OK if the string is a valid amount specification, * #GNUNET_SYSERR if it is invalid. */ enum GNUNET_GenericReturnValue TALER_string_to_amount_nbo (const char *str, struct TALER_AmountNBO *amount_nbo); /** * Get the value of "zero" in a particular currency. * * @param cur currency description * @param[out] amount amount to write the result to * @return #GNUNET_OK if @a cur is a valid currency specification, * #GNUNET_SYSERR if it is invalid. */ enum GNUNET_GenericReturnValue TALER_amount_set_zero (const char *cur, struct TALER_Amount *amount); /** * Test if the given @a amount is zero. * * @param amount amount to compare to zero * @return true if the amount is zero, * false if it is non-zero or invalid */ bool TALER_amount_is_zero (const struct TALER_Amount *amount); /** * Test if the given amount is valid. * * @param amount amount to check * @return #GNUNET_OK if @a amount is valid */ enum GNUNET_GenericReturnValue TALER_amount_is_valid (const struct TALER_Amount *amount); /** * Convert amount from host to network representation. * * @param[out] res where to store amount in network representation * @param d amount in host representation */ void TALER_amount_hton (struct TALER_AmountNBO *res, const struct TALER_Amount *d); /** * Convert amount from network to host representation. * * @param[out] res where to store amount in host representation * @param dn amount in network representation */ void TALER_amount_ntoh (struct TALER_Amount *res, const struct TALER_AmountNBO *dn); /** * Compare the value/fraction of two amounts. Does not compare the currency. * Comparing amounts of different currencies will cause the program to abort(). * If unsure, check with #TALER_amount_cmp_currency() first to be sure that * the currencies of the two amounts are identical. * * @param a1 first amount * @param a2 second amount * @return result of the comparison * -1 if `a1 < a2` * 1 if `a1 > a2` * 0 if `a1 == a2`. */ int TALER_amount_cmp (const struct TALER_Amount *a1, const struct TALER_Amount *a2); /** * Compare the value/fraction of two amounts. Does not compare the currency. * Comparing amounts of different currencies will cause the program to abort(). * If unsure, check with #TALER_amount_cmp_currency() first to be sure that * the currencies of the two amounts are identical. NBO variant. * * @param a1 first amount * @param a2 second amount * @return result of the comparison * -1 if `a1 < a2` * 1 if `a1 > a2` * 0 if `a1 == a2`. */ int TALER_amount_cmp_nbo (const struct TALER_AmountNBO *a1, const struct TALER_AmountNBO *a2); /** * Test if @a a1 and @a a2 are the same currency. * * @param a1 amount to test * @param a2 amount to test * @return #GNUNET_YES if @a a1 and @a a2 are the same currency * #GNUNET_NO if the currencies are different * #GNUNET_SYSERR if either amount is invalid */ enum GNUNET_GenericReturnValue TALER_amount_cmp_currency (const struct TALER_Amount *a1, const struct TALER_Amount *a2); /** * Test if @a a1 and @a a2 are the same currency, NBO variant. * * @param a1 amount to test * @param a2 amount to test * @return #GNUNET_YES if @a a1 and @a a2 are the same currency * #GNUNET_NO if the currencies are different * #GNUNET_SYSERR if either amount is invalid */ enum GNUNET_GenericReturnValue TALER_amount_cmp_currency_nbo (const struct TALER_AmountNBO *a1, const struct TALER_AmountNBO *a2); /** * Possible results from calling #TALER_amount_subtract() and * possibly other arithmetic operations. Negative values * indicate that the operation did not generate a result. */ enum TALER_AmountArithmeticResult { /** * Operation succeeded, result is positive. */ TALER_AAR_RESULT_POSITIVE = 1, /** * Operation succeeded, result is exactly zero. */ TALER_AAR_RESULT_ZERO = 0, /** * Operation failed, the result would have been negative. */ TALER_AAR_INVALID_NEGATIVE_RESULT = -1, /** * Operation failed, result outside of the representable range. */ TALER_AAR_INVALID_RESULT_OVERFLOW = -2, /** * Operation failed, inputs could not be normalized. */ TALER_AAR_INVALID_NORMALIZATION_FAILED = -3, /** * Operation failed, input currencies were not identical. */ TALER_AAR_INVALID_CURRENCIES_INCOMPATIBLE = -4 }; /** * Perform saturating subtraction of amounts. * * @param[out] diff where to store (@a a1 - @a a2), or invalid if @a a2 > @a a1 * @param a1 amount to subtract from * @param a2 amount to subtract * @return operation status, negative on failures */ enum TALER_AmountArithmeticResult TALER_amount_subtract (struct TALER_Amount *diff, const struct TALER_Amount *a1, const struct TALER_Amount *a2); /** * Perform addition of amounts. * * @param[out] sum where to store @a a1 + @a a2, set to "invalid" on overflow * @param a1 first amount to add * @param a2 second amount to add * @return operation status, negative on failures */ enum TALER_AmountArithmeticResult TALER_amount_add (struct TALER_Amount *sum, const struct TALER_Amount *a1, const struct TALER_Amount *a2); /** * Divide an amount by a @ divisor. Note that this function * may introduce a rounding error! * * @param[out] result where to store @a dividend / @a divisor * @param dividend amount to divide * @param divisor by what to divide, must be positive */ void TALER_amount_divide (struct TALER_Amount *result, const struct TALER_Amount *dividend, uint32_t divisor); /** * Divide one amount by another. Note that this function * may introduce a rounding error. It rounds down. * * @param dividend amount to divide * @param divisor by what to divide, must be positive * @return @a dividend / @a divisor, rounded down. -1 on currency mismatch, * INT_MAX for division by zero */ int TALER_amount_divide2 (const struct TALER_Amount *dividend, const struct TALER_Amount *divisor); /** * Multiply an @a amount by a @ factor. * * @param[out] result where to store @a amount * @a factor * @param amount amount to multiply * @param factor factor by which to multiply */ enum TALER_AmountArithmeticResult TALER_amount_multiply (struct TALER_Amount *result, const struct TALER_Amount *amount, uint32_t factor); /** * Normalize the given amount. * * @param[in,out] amount amount to normalize * @return #GNUNET_OK if normalization worked * #GNUNET_NO if value was already normalized * #GNUNET_SYSERR if value was invalid or could not be normalized */ int TALER_amount_normalize (struct TALER_Amount *amount); /** * Convert amount to string. * * @param amount amount to convert to string * @return freshly allocated string representation, * NULL if the @a amount was invalid */ char * TALER_amount_to_string (const struct TALER_Amount *amount); /** * Convert amount to string. * * @param amount amount to convert to string * @return statically allocated buffer with string representation, * NULL if the @a amount was invalid */ const char * TALER_amount2s (const struct TALER_Amount *amount); /** * Round the amount to something that can be transferred on the wire. * The rounding mode is specified via the smallest transferable unit, * which must only have a fractional part *or* only a value (either * of the two must be zero!). * * @param[in,out] amount amount to round down * @param[in] round_unit unit that should be rounded down to, and * either value part or the faction must be zero (but not both) * @return #GNUNET_OK on success, #GNUNET_NO if rounding was unnecessary, * #GNUNET_SYSERR if the amount or currency or @a round_unit was invalid */ enum GNUNET_GenericReturnValue TALER_amount_round_down (struct TALER_Amount *amount, const struct TALER_Amount *round_unit); #if 0 /* keep Emacsens' auto-indent happy */ { #endif #ifdef __cplusplus } #endif #endif
__label__pos
0.780171
基于udp协议的套接字通信,socketserver模块,阿里云部署 一.基于udp协议的套接字通信 TCP 协议与 UDP协议对比 tcp协议是可靠协议,对方必须回复一个ack确认信息,才会将自己这段的数据从内存中删除 tcp有链接,传输数据的效率较udp低 tcp协议有粘包问题 udp协议不可靠协议,发送一条消息就会立即删除,不管对方是否接收到 udp协议无链接,传输数据的效率高 udp协议称之为数据报协议,每次发送都是一个完整的数据报,一个发送唯一对应一个接收,所以udp协议没有粘包问题 udp是无链接的,先启动哪一端都不会报错 udp服务端 ss = socket() #创建一个服务器的套接字 ss.bind() #绑定服务器套接字 inf_loop: #服务器无限循环 cs = ss.recvfrom()/ss.sendto() # 对话(接收与发送) ss.close() # 关闭服务器套接字 udp客户端 cs = socket() # 创建客户套接字 comm_loop: # 通讯循环 cs.sendto()/cs.recvfrom() # 对话(发送/接收) cs.close() udp套接字简单示例 udp协议服务端 from socket import * import time server = socket(AF_INET, SOCK_DGRAM) server.bind(('127.0.0.1', 8080)) while True: data, client_addr = server.recvfrom(1024) time.sleep(10) server.sendto(data.upper(), client_addr) udp协议客户端 from socket import * client = socket(AF_INET, SOCK_DGRAM) while True: msg = input('>>: ').strip() client.sendto(msg.encode("utf-8"), ('127.0.0.1', 8080)) data, server_addr = client.recvfrom(1024) print(data.decode('utf-8')) 二.基于socketserver模块实现并发tcp 基于tcp的套接字,关键就是两个循环,一个链接循环,一个通信循环 socketserver模块中分两大类:server类(解决链接问题)和request类(解决通信问题) 服务端 import socketserver class MyRequestHandler(socketserver.BaseRequestHandler): def handle(self): # 处理通信 print(self.client_address) while True: try: data = self.request.recv(1024) # self.request=>conn if len(data) == 0: break self.request.send(data.upper()) except Exception: break self.request.close() if __name__ == '__main__': s = socketserver.ThreadingTCPServer(('127.0.0.1', 8080), MyRequestHandler, bind_and_activate=True) s.serve_forever() 客户端 from socket import * client=socket(AF_INET,SOCK_STREAM) client.connect(('127.0.0.1',8080)) while True: msg=input(">>: ").strip() if len(msg) == 0: continue client.send(msg.encode('utf-8')) data=client.recv(1024) print(data.decode('utf-8')) 三.基于socketserver模块实现并发udp 服务端 import socketserver class MyRequestHandler(socketserver.BaseRequestHandler): def handle(self): # 处理通信 data,server=self.request server.sendto(data.upper(),self.client_address) if __name__ == '__main__': s = socketserver.ThreadingUDPServer(('127.0.0.1', 8080), MyRequestHandler, bind_and_activate=True) s.serve_forever() 客户端 from socket import * client = socket(AF_INET, SOCK_DGRAM) while True: msg = input('>>: ').strip() client.sendto(msg.encode("utf-8"), ('127.0.0.1', 8080)) data, server_addr = client.recvfrom(1024) print(data.decode('utf-8')) 四.阿里云部署 服务端 import socketserver class MyRequestHandler(socketserver.BaseRequestHandler): def handle(self): # 处理通信 print(self.client_address) while True: try: data = self.request.recv(1024) # self.request=>conn if len(data) == 0: break self.request.send(data.upper()) except Exception: break self.request.close() if __name__ == '__main__': s = socketserver.ThreadingTCPServer(('127.0.0.1', 8080), MyRequestHandler, bind_and_activate=True) s.serve_forever() 客户端 from socket import * client=socket(AF_INET,SOCK_STREAM) client.connect(('121.199.45.113',8080)) while True: msg=input(">>: ").strip() if len(msg) == 0: continue client.send(msg.encode('utf-8')) data=client.recv(1024) print(data.decode('utf-8')) • 0 点赞 • 1 收藏 觉得还不错? 一键收藏 • 打赏 打赏 • 0 评论 “相关推荐”对你有帮助么? • 非常没帮助 • 没帮助 • 一般 • 有帮助 • 非常有帮助 提交 评论 添加红包 请填写红包祝福语或标题 红包个数最小为10个 红包金额最低5元 当前余额3.43前往充值 > 需支付:10.00 成就一亿技术人! 领取后你会自动成为博主和红包主的粉丝 规则 hope_wisdom 发出的红包 打赏作者 yikenaoguazi 你的鼓励将是我创作的最大动力 ¥2 ¥4 ¥6 ¥10 ¥20 输入1-500的整数 余额支付 (余额:-- ) 扫码支付 扫码支付:¥2 获取中 扫码支付 您的余额不足,请更换扫码支付或充值 打赏作者 实付 使用余额支付 点击重新获取 扫码支付 钱包余额 0 抵扣说明: 1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。 2.余额无法直接购买下载,可以购买VIP、C币套餐、付费专栏及课程。 余额充值
__label__pos
0.746544
Latest in Gaming Image credit: Breakfast Topic: How closely do you follow development news? Share Tweet Share Save In the era of Twitter and reddit, it's not hard to escalate from being an enthusiastic fan of WoW to rabidly following its creators' every last tweet and cough. You feel you must hear every last whisper about the object of your affections as soon as uttered or even implied. The hunger to analyze every last detail of the development process becomes something akin to drinking from a firehose -- or perhaps it's simply FOMO. I suspect the level of interest in developer insights is somewhat similar to the level of interest in attacking beta content as soon as it's available. Some players simply want to get there first. Others truly want to test and poke and weigh alternatives and then contribute their own two cents to the development process. Still others crave insider status, and some simply enjoy participating in every aspect of the online WoW community and feeling as though they're on top of every last thing there is to know about WoW. How closely do you follow news of WoW's ongoing development? If you keep your nose glued to every last scrap of insider information about the game, what's the appeal? Or do you follow development cycle news only loosely, here at WoW Insider or even only via patch notes? From around the web ear iconeye icontext file
__label__pos
0.560456
How to disconnect an external wallet from the INNO platform? Guidance for unlinking external wallet Once you disconnect the wallet, you won't be able to access all crypto assets, NFTs, quantities, and rewards held in the wallet. Also, to prevent fraudulent activity, this wallet cannot be connected to any other account, and you will need to reconnect to this account to regain access to the wallet. Once connected, the wallet can be disconnected after 24 hours. 1. Click the "Setting" button 2. Click the "Disconnect wallet" button 3. Agree to an agreement for a disconnection 4. Click " Disconnect wallet" Last updated
__label__pos
0.999093
Skip to content Create Http Triggered Azure Function in ASP.NET Core Azure functions runs in a serverless environment and is automatically scaled if they are hosted in a consumption plan (hosting plan). A consumtion plan means that resources is allocated when they are needed and that you only pay when your functions is running. Azure functions is microservices, independent microservices is a great way to scale your website and to improve modularity for your project. A http triggered Azure function runs when someone calls its url, you can call a function from your code, from an Azure Logic App or from Postman for example. Azure functions is publicly available (AuthorizationLevel.Anonymous) as default, you can restrict access to a function by choosing another authorization level. Set the authorization level to Function (AuthorizationLevel.Function) to require a function key as a Code parameter in the request. You can create function keys in Azure Portal for each function. Project templates and NuGet packages Install Azure Functions and Web Jobs Tools under Tools/Extensions and Updates in Visual studio to get templates and tools for azure functions. Create a new project and use Azure Functions as the template. You will also need to install Microsoft.NET.Sdk.Functions and Microsoft.Azure.Functions.Extensions to be able to use dependency injection in the project. Settings Your project have a host.json file and a local.settings.json file. You always need a connection string to a storage account for AzureWebJobsStorage in your application settings. The contents of the host.json file is shown below, function timeout is set to 10 minutes. An ordinary azure function can not run longer than 10 minutes, create a durable function if you have long running tasks. { "version": "2.0", "functionTimeout": "00:10:00", "logging": { "logLevel": { "default": "Information" } } } Startup Our startup class is used to register options and repositories so that we can use dependency injection in our project. using System; using Microsoft.Extensions.DependencyInjection; using Microsoft.Azure.Functions.Extensions.DependencyInjection; [assembly: FunctionsStartup(typeof(Fotbollstabeller.Functions.Startup))] namespace Fotbollstabeller.Functions { public class Startup : FunctionsStartup { public override void Configure(IFunctionsHostBuilder builder) { // Create options builder.Services.Configure<DatabaseOptions>(options => { options.connection_string = Environment.GetEnvironmentVariable("SqlConnectionString"); options.sql_retry_count = 3; }); // Add repositories builder.Services.AddSingleton<IDatabaseRepository, MsSqlRepository>(); builder.Services.AddSingleton<IGroupRepository, GroupRepository>(); builder.Services.AddSingleton<IFinalRepository, FinalRepository>(); builder.Services.AddSingleton<IXslTemplateRepository, XslTemplateRepository>(); builder.Services.AddSingleton<IXslProcessorRepository, XslProcessorRepository>(); } // End of the Configure method } // End of the class } // End of the namespace Function This class only contains one function, you can add multiple functions in a class. You need a function key to call this function in production, the uri will look like: https://myfunction.com/api/updategroupsandfinals?Code=XXXXXXXXXXX. No function key is needed during development. using System; using System.IO; using Microsoft.Extensions.Logging; using Microsoft.AspNetCore.Mvc; using Microsoft.AspNetCore.Http; using Microsoft.Azure.WebJobs; using Microsoft.Azure.WebJobs.Extensions.Http; namespace Fotbollstabeller.Functions { public class UpdateGroupsAndFinals { #region Variables private readonly ILogger logger; private readonly IXslProcessorRepository xsl_processor; #endregion #region Constructors public UpdateGroupsAndFinals(ILogger<UpdateGroupsAndFinals> logger, IXslProcessorRepository xsl_processor) { // Set values for instance variables this.logger = logger; this.xsl_processor = xsl_processor; } // End of the constructor #endregion #region Function [FunctionName("UpdateGroupsAndFinals")] public IActionResult Update([HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest request) { // Log the start of the function this.logger.LogInformation($"Application started at: {DateTime.UtcNow}"); // Get header values string header = request.Headers["Host"]; // Get query paramter string query = request.Query["Key1"]; // Get form value string form_value = request.Form["FormKey1"]; // Get the entire body string body = ""; using (StreamReader reader = new StreamReader(request.Body)) { body = reader.ReadToEnd(); } // Do the work this.xsl_processor.UpdateGroups(); this.xsl_processor.UpdateFinals(); // Log the end of the function this.logger.LogInformation($"Application ended at: {DateTime.UtcNow}"); return new OkObjectResult("Done"); } // End of the run method #endregion } // End of the class } // End of the namespace Durable function You will need a durable function if you have a long running task. To add a durable function: right-click your project file in Visual Studio, select Add/New Azure Function… and choose the Durable Functions Orchestration template. The uri to the method will look like: https://myfunction.com/api/DurableFunction_HttpStart. using System; using System.Collections.Generic; using System.Net.Http; using System.Threading.Tasks; using Microsoft.Azure.WebJobs; using Microsoft.Azure.WebJobs.Extensions.Http; using Microsoft.Extensions.Logging; namespace Fotbollstabeller.Functions { public class DurableFunction { #region Variables private readonly ILogger logger; private readonly IXslProcessorRepository xsl_processor; #endregion #region Constructors public DurableFunction(ILogger<DurableFunction> logger, IXslProcessorRepository xsl_processor) { // Set values for instance variables this.logger = logger; this.xsl_processor = xsl_processor; } // End of the constructor #endregion #region Functions [FunctionName("DurableFunction")] public async Task<List<string>> RunOrchestrator([OrchestrationTrigger] DurableOrchestrationContext context) { // Create a list with outputs List<string> outputs = new List<string>(); // Log the start of the function this.logger.LogInformation($"Application started at: {DateTime.UtcNow}"); // Call activities outputs.Add(await context.CallActivityAsync<string>("DurableFunction_Update", "Groups")); outputs.Add(await context.CallActivityAsync<string>("DurableFunction_Update", "Finals")); // Log the end of the function this.logger.LogInformation($"Application ended at: {DateTime.UtcNow}"); // Return a list return outputs; } // End of the RunOrchestrator method [FunctionName("DurableFunction_Update")] public string Update([ActivityTrigger] string name) { // Do the work if(name == "Groups") { this.xsl_processor.UpdateGroups(); } else if (name == "Finals") { this.xsl_processor.UpdateFinals(); } return $"Done {name}"; } // End of the Update method [FunctionName("DurableFunction_HttpStart")] public async Task<HttpResponseMessage> HttpStart([HttpTrigger(AuthorizationLevel.Anonymous, "get", "post")]HttpRequestMessage req, [OrchestrationClient]DurableOrchestrationClient starter) { // Function input comes from the request content. string instanceId = await starter.StartNewAsync("DurableFunction", null); this.logger.LogInformation($"Started orchestration with ID = '{instanceId}'."); return starter.CreateCheckStatusResponse(req, instanceId); } #endregion } // End of the class } // End of the namespace Publish application Right-click your project file and click on Publish… to create Azure resources and a template to publish your application. Create or select an existing hosting plan for your application, you can have many applications in the same hosting plan. Choose a consumption plan if you want automatic scaling. Leave a Reply Your email address will not be published.
__label__pos
0.83912
Tuesday October 13, 2015 Homework Help: MATH HELP HELP HELP HELP HELP Posted by MATH HELP HELPPP on Sunday, November 27, 2011 at 9:18pm. (a)Sarah wants to make a bracelet. She needs some beads and goes to a shop where they sell beads in exactly 100 different colours. i. How many ways can she pick 25 beads, each of a different colour? ii. How many ways can she pick any 25 beads? (b) Suppose that Sarah has picked 25 beads each of different colours. i If Sarah arranges all of these beads in a line on the table in front of her how many different arrangements are possible? What about if she arranges them in a circle on the table, how many arrangements are possible if two arrangements are considered identical if they can be obtained from one another by rotation? ii. If Sarah uses all of the 25 beads to make her bracelet, how many distinct bracelets can she make ? iii. What if she decides to make her bracelet using at least 20 of the beads, how many distinct bracelets can she make then? Answer this Question First Name: School Subject: Answer: Related Questions More Related Questions Members
__label__pos
0.999923
Login regex search in admin forms Author: mbee Posted: November 24, 2013 Language: Python Version: 1.6 Tags: admin search regex field Score: 1 (after 1 ratings) Despite warning coming from django developers, I'm still using admin classes to quickly get into reverse engineering databases. One feature is missing: searching into fields thanks to a regex. One dirty solution I found is to overwrite get_search_results. But most of the code comes from django itself. If anyone has a better idea ;) Usage: 1. works since get_search_results is part of ModelAdmin (1.5 if I remember well) 2. Inherit your Admin class from RegexModelAdmin 3. enclose by / the field you want to regex with: search_fields = ['/field/', ] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 from django.contrib.admin.util import lookup_needs_distinct class RegexModelAdmin(admin.ModelAdmin): # code coming from django 1.6 def get_search_results(self, request, queryset, search_term): """ Returns a tuple containing a queryset to implement the search, and a boolean indicating if the results may contain duplicates. """ # Apply keyword searches. def construct_search(field_name): if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] elif field_name.startswith('/') and field_name.endswith('/') and len(field_name) > 2: return "%s__regex" % field_name[1:-1] else: return "%s__icontains" % field_name use_distinct = False if self.search_fields and search_term: orm_lookups = [construct_search(str(search_field)) for search_field in self.search_fields] for bit in search_term.split(): or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] queryset = queryset.filter(reduce(operator.or_, or_queries)) if not use_distinct: for search_spec in orm_lookups: if lookup_needs_distinct(self.opts, search_spec): use_distinct = True break return queryset, use_distinct class TokenAdmin(RegexModelAdmin): list_display = ('token', 'token_type') search_fields = ['/token/', '/token_type/', ] More like this 1. DRY Fieldsets by DrMeers 6 years ago 2. Add delete buttons to admin changelist by kylefox 8 years, 1 month ago 3. FieldsetForm by Ciantic 8 years, 3 months ago 4. Search child models in django admin changelist by navedr 9 months, 1 week ago 5. GeoDjango maps in admin TabularInlines by alanB 4 years, 9 months ago Comments andybak (on November 25, 2013): Despite warning coming from django developers, I'm still using admin classes to quickly get into reverse engineering databases. I think you might find the community divided on this point. The sheer number of admin apps and tweaks shows you're not alone in finding this a good approach to take! Nearly all the major Django CMS projects are heavily based on the Django Admin, for instance. # Please login first before commenting.
__label__pos
0.952409
Métodos de outro package Olá pessoal! Sou iniciante em JAVA e estou com uma dificuldade. Como chamar um método de uma classe que está em outro package? Sei que existe possibilidade mas não encontrei a solução ainda. Alguém pode me ajudar? Desde já agradeço a colaboração de todos. Igor Novaes Quando vc importa por exemplo uma classe de um package, suponha que vc tenho um package que tenha uma classe que se chama lista, que se trata de uma lista encadeada que vc prescise utilizar em um programa por exemplo, vc faz da seguinte forma import br.com.estruturas.lista; quando vc faz isso vc está importando a classe lista e vc pode usar todos os seus métodos. exemplo: Fila chama = new Fila(); chama.desenfileirar(); Ok! Deu certo… Mas Veja: Tenho a classe Cachorro do Package animal: package Animal; public class Cachorro { public void Comer(){ System.out.println("Comendo"); } } Tenho a classe funcionario do package Homem: package Homem; import Animal.Cachorro; public class Funcionario{ Funcionario(){ Cachorro c = new Cachorro(); c.Comer(); } } Não consigo executar o método comer da classe Cachorro na classe Homem. Como fazer isso? Aproveitar o método da classe Cachorro? Desde já agradeço. Vc deveria executar o método c.Comer() , qual erro o compilador está lhe devolvendo? lembrando que o nivel de aacesso para o metodo é essencial se eel for default ele nao é herdado para outro package… mesmo se a class for public.
__label__pos
0.739523
BLACK FRIDAY SALE: Save 50% on all my Swift books and bundles! >> Animating UIStackView subview layout Before we get onto the CloudKit part of this tutorial, we're going to add a bit more to our user interface. Specifically, we're going to add a "Tap to Play" button into the stack view, and have it animate so that it slides out when recording has finished. This is the work of only a few minutes thanks to UIStackView, and I'm sure you'll agree the results look marvelous. While we're finishing up the user interface, we're going to quickly add a couple more simple view controllers to let the user attach some metadata to their whistle: they'll be able to select what genre it is, then enter some free text with any comments – something like "I definitely remember hearing it in the early 90s" to help listeners narrow the scope a little. First, the play button. Add this new property: var playButton: UIButton! Now create it by placing this just before the end of loadRecordingUI(): playButton = UIButton() playButton.translatesAutoresizingMaskIntoConstraints = false playButton.setTitle("Tap to Play", for: .normal) playButton.isHidden = true playButton.alpha = 0 playButton.titleLabel?.font = UIFont.preferredFont(forTextStyle: .title1) playButton.addTarget(self, action: #selector(playTapped), for: .touchUpInside) stackView.addArrangedSubview(playButton) That's almost identical to the code for creating the record button, except the play button is set to hidden and alpha 0. Normally you need only one of these, but with stack views it's a little different: a view that is not hidden but has an alpha of 0 appears hidden (i.e., the user can't see it) but still occupies space in the stack view. By setting the button to be hidden and have alpha 0, we're saying "don't show it to the user, and don't let it take up any space in the stack view." We want to show and hide that play button when needed, meaning that we show it when recording finished successfully and hide it if the user taps to re-record. To solve the first of those, put this code into the finishRecording() method, just before setting the right bar button item: if playButton.isHidden { UIView.animate(withDuration: 0.35) { [unowned self] in self.playButton.isHidden = false self.playButton.alpha = 1 } } To solve the second, put this into recordTapped(), just after the call to startRecording(): if !playButton.isHidden { UIView.animate(withDuration: 0.35) { [unowned self] in self.playButton.isHidden = true self.playButton.alpha = 0 } } The isHidden property of any UIView subclass is a simple boolean, meaning that it's either true or false: a view is either hidden or it's not. As a result, if we had put this code anywhere else it would be meaningless to try to animate it, because there are no intermediate steps between "visible" and "invisible" to animate. But with UIStackView it has a meaning, and that meaning is brilliant: the stack view will animate the play button being shown, making it slide out neatly. Changing the alpha at the same time is the perfect finishing touch. When we created the play button we attached a method called playTapped() to it, which isn't written yet. But now that you've seen how to use AVAudioRecorder, the code to play using AVAudioPlayer should be second nature. Just in case you don't fancy writing the code for yourself, I'll walk you through the steps. First, create a new property to hold the audio player: var whistlePlayer: AVAudioPlayer! Now, add a playTapped() method using the code below. This grabs the shared whistle URL, creates an AVAudioPlayer inside a do/try/catch block, and makes it play. If there's an error loading the sound it shows an alert message to the user. Easy, right? @objc func playTapped() { let audioURL = RecordWhistleViewController.getWhistleURL() do { whistlePlayer = try AVAudioPlayer(contentsOf: audioURL) whistlePlayer.play() } catch { let ac = UIAlertController(title: "Playback failed", message: "There was a problem playing your whistle; please try re-recording.", preferredStyle: .alert) ac.addAction(UIAlertAction(title: "OK", style: .default)) present(ac, animated: true) } } If you run the app now I think you'll agree it looks good, particularly as the play button slides out in the stack view. Being able to hear what you recorded is of course a nice touch! Once the user has a recording they are happy with, we're going to ask them to choose which genre they think it belongs to, and add any comments. At this stage in your Swift coding career, both of these should be very simple view controllers that you can make in just a few minutes. Add a new file to your project, choosing Cocoa Touch Class. Make it a subclass of UITableViewController and name it SelectGenreViewController. Open the file for editing, and give it this property: static var genres = ["Unknown", "Blues", "Classical", "Electronic", "Jazz", "Metal", "Pop", "Reggae", "RnB", "Rock", "Soul"] This is marked as static so that we can use it in lots of other places – it's a shared list of all the music categories we want to work with. I added "Unknown" in there for people like me who struggle to tell the difference between some music types! In this class's viewDidLoad() method we're going to give it a title, configure the back button to take up less space, then register a cell for re-use. All old stuff: override func viewDidLoad() { super.viewDidLoad() title = "Select genre" navigationItem.backBarButtonItem = UIBarButtonItem(title: "Genre", style: .plain, target: nil, action: nil) tableView.register(UITableViewCell.self, forCellReuseIdentifier: "Cell") } For handling the content of the table view, it's all code you've seen in previous projects, but I want to point out three things: 1. When referencing the genres array we need to use SelectGenreViewController.genres because the array belongs to the class, not to our instance of the class. 2. When reading the text of the cell that was tapped, we're going to use the nil coalescing operator. The nil coalescing operator was covered in project 12, and in this situation it guarantees we have a genre. 3. When the user has selected a genre, we're going to create an instance of the class AddCommentsViewController, store that genre there, then push it onto our navigation stack. That's it – here are the methods for handling the table view data source and delegate: override func numberOfSections(in tableView: UITableView) -> Int { return 1 } override func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int { return SelectGenreViewController.genres.count } override func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell { let cell = tableView.dequeueReusableCell(withIdentifier: "Cell", for: indexPath) cell.textLabel?.text = SelectGenreViewController.genres[indexPath.row] cell.accessoryType = .disclosureIndicator return cell } override func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) { if let cell = tableView.cellForRow(at: indexPath) { let genre = cell.textLabel?.text ?? SelectGenreViewController.genres[0] let vc = AddCommentsViewController() vc.genre = genre navigationController?.pushViewController(vc, animated: true) } } That completes the class – I've deliberately kept it simple because this tutorial is about CloudKit rather than tables! You can now return to RecordWhistleViewController.swift and fill in the nextTapped() method like this: @objc func nextTapped() { let vc = SelectGenreViewController() navigationController?.pushViewController(vc, animated: true) } There's one more easy class to add before we get onto CloudKit, and that's AddCommentsViewController. This will show a full-screen UITextView for the user to type any extra comments into. We're going to give this new class three properties: one to hold the genre that gets passed in from SelectGenreViewController, one to hold a reference to the UITextView, and one to hold a placeholder string. That last property will be used to solve a long-standing UITextView annoyance: unlike UITextField, you can’t give a UITextView a placeholder string, which is a piece of text telling users what to type in there. We'll replicate this behavior by putting a default string into the text view and removing it when the user taps it. So, create a new Cocoa Touch class. Name it “AddCommentsViewController”, make it inherit from “UIViewController”, then give it these three properties: var genre: String! var comments: UITextView! let placeholder = "If you have any additional comments that might help identify your tune, enter them here." We're going to override the loadView() method of this class, using it to create a new UITextView that is pinned to all edges using Auto Layout. The only vaguely interesting thing here is that we'll use Dynamic Type to make the font size adjustable for the user. Here's the code: override func loadView() { view = UIView() view.backgroundColor = .white comments = UITextView() comments.translatesAutoresizingMaskIntoConstraints = false comments.delegate = self comments.font = UIFont.preferredFont(forTextStyle: .body) view.addSubview(comments) comments.leadingAnchor.constraint(equalTo: view.leadingAnchor).isActive = true comments.trailingAnchor.constraint(equalTo: view.trailingAnchor).isActive = true comments.topAnchor.constraint(equalTo: view.safeAreaLayoutGuide.topAnchor).isActive = true comments.bottomAnchor.constraint(equalTo: view.safeAreaLayoutGuide.bottomAnchor).isActive = true } As per usual, assigning the view controller to be a delegate of something requires conforming to a protocol. In this case, it means conforming to UITextViewDelegate, so please add that now. The absolute least we need to do to make this class work is to fill in the viewDidLoad() method with a title for the view controller and a right bar button item to let the user proceed with their submission, then to write a submitTapped() method that gets triggered when the button is tapped. Submitting will use another new class that we'll define shortly, called SubmitViewController, and will pass in the genre we got from SelectGenreViewController and the user's comments if there are any. If they kept the placeholder intact, we'll send an empty string on. Here's the code: override func viewDidLoad() { super.viewDidLoad() title = "Comments" navigationItem.rightBarButtonItem = UIBarButtonItem(title: "Submit", style: .plain, target: self, action: #selector(submitTapped)) comments.text = placeholder } @objc func submitTapped() { let vc = SubmitViewController() vc.genre = genre if comments.text == placeholder { vc.comments = "" } else { vc.comments = comments.text } navigationController?.pushViewController(vc, animated: true) } We could easily leave it there and get onto to the CloudKit work, but there's one small tweak we can make to improve the whole experience. As this view controller is the delegate for the comments text view, iOS will send us the textViewDidBeginEditing() message when the user starts editing it. We can then compare the text view's current text against the placeholder, and clear it if they match. Here's that code: func textViewDidBeginEditing(_ textView: UITextView) { if textView.text == placeholder { textView.text = "" } } That's it: in order to build the SubmitViewController class, it's time to introduce CloudKit. Save 50% in my Black Friday sale. SAVE 50% To celebrate Black Friday, all our books and bundles are half price, so you can take your Swift knowledge further without spending big! Get the Swift Power Pack to build your iOS career faster, get the Swift Platform Pack to builds apps for macOS, watchOS, and beyond, or get the Swift Plus Pack to learn advanced design patterns, testing skills, and more. Save 50% on all our books and bundles! Sponsor Hacking with Swift and reach the world's largest Swift community! BUY OUR BOOKS Buy Pro Swift Buy Pro SwiftUI Buy Swift Design Patterns Buy Testing Swift Buy Hacking with iOS Buy Swift Coding Challenges Buy Swift on Sundays Volume One Buy Server-Side Swift Buy Advanced iOS Volume One Buy Advanced iOS Volume Two Buy Advanced iOS Volume Three Buy Hacking with watchOS Buy Hacking with tvOS Buy Hacking with macOS Buy Dive Into SpriteKit Buy Swift in Sixty Seconds Buy Objective-C for Swift Developers Buy Beyond Code Was this page useful? Let us know! Average rating: 4.0/5   Unknown user You are not logged in Log in or create account   Link copied to your pasteboard.
__label__pos
0.86776
Skip to content Browse files Update master • Loading branch information... 1 parent 18269ff commit f6ff4f6b2bf4ffc2c4de3407a474e48803f4fe32 @nakamura-to committed Jun 16, 2012 Showing with 180 additions and 109 deletions. 1. +180 −109 README.md View 289 README.md @@ -11,62 +11,191 @@ $ npm install gate ## Example +You can get each asynchronous call result by an index or a name. + +### Indexed Results + +```js +var gate = require('gate'); +var fs = require('fs'); + +var g = gate.create(); +fs.readFile('file1', 'utf8', g.latch({data: 1})); +fs.readFile('file2', 'utf8', g.latch({data: 1})); + +g.await(function (err, results) { + if (err) throw err; + console.log(results[0].data); // content for file1 + console.log(results[1].data); // content for file2 +}); +``` + +### Named Results + ```js var gate = require('gate'); var fs = require('fs'); -var latch = gate.latch(); -fs.readFile('file1', 'utf8', latch({name: 'file1', data: 1})); -fs.readFile('file2', 'utf8', latch({name: 'file2', data: 1})); +var g = gate.create(); +fs.readFile('file1', 'utf8', g.latch('file1Result', {data: 1})); +fs.readFile('file2', 'utf8', g.latch('file2Result', {data: 1})); -latch.await(function (err, results) { +g.await(function (err, results) { if (err) throw err; - console.log(results[0]); // { name: 'file1', data: 'FILE1' } - console.log(results[1]); // { name: 'file2', data: 'FILE2' } + console.log(results.file1Result.data); // content for file1 + console.log(results.file2Result.data); // content for file2 }); ``` ## API `gate` module provides following API. -### latch([Number count]) -> Function +#### create([Number count]) -> Gate -Returns a function which represents a latch. The returned function provides following API. +Returns a Gate object. * `count`: Optional. A number of times the returned function must be called before an awaiting callback can start. -#### ([Object mapping][, Boolean skipErrorCheck]) -> Function +```js +var g = gate.create(); +``` + +```js +var g = gate.create(5); +``` + +-- + +`Gate` object provides following API. + +#### latch([String name][, Object mapping][, Boolean skipErrorCheck]) -> Function + +Returns a callback. The callback arguments are mapped with a `mapping` definition. +If a count is given to `gate.create()`, the count is decremented. + +* `name`: Optional. A name for callback arguments. +If not specified, an index number is used as name. + +```js +var g = gate.create(); +fs.readFile('file1', 'utf8', g.latch('file1Result', {data: 1})); // name specified +fs.readFile('file2', 'utf8', g.latch({data: 1})); // name not specified + +g.await(function (err, results) { + if (err) throw err; + console.log(results.file1Result.data); // content for file1 + console.log(results[1].data); // content for file2 +}); + +``` -Accepts an argument mapping definition and returns a callback. -If a count is given with `gate.latch()`, the count is decremented. +* `mapping`: Optional. An argument mapping definition. The `mapping` gives names to callback arguments. The `mappipng` must be a number or an object. +If the `mapping` is a number, single argument is mapped. +If the `mapping` is an object, multiple arguments can be mapped. +If the `mapping` is `null` or `undefined`, all arguments are mapped as Array. -* `mapping`: Optional. An argument mapping definition. The `mappipng` must be a number or an object. -If the `mapping` is a number, single callback argument is mapped. -If the `mapping` is an object, multiple callback arguments can be mapped. -If the `mapping` is `null` or `undefined`, all callback arguments are mapped as Array. +```js +var g = gate.create(); +fs.readFile('file1', 'utf8', g.latch(1)); // single argument +fs.readFile('file2', 'utf8', g.latch({data: 1, name: 'file2'})); // multiple arguments +fs.readFile('file3', 'utf8', g.latch()); // all arguments + +g.await(function (err, results) { + if (err) throw err; + console.log(results[0]); // content for file1 + console.log(results[1].data); // content for file2 + console.log(results[1].name); // arbitrary value for file2 + console.log(results[2][0]); // read error for file3 (1st argument of fs.readFile callback) + console.log(results[2][1]); // content for file3 (2nd argument of fs.readFile callback) +}); + +``` * `skipErrorCheck`: Optional. Indicates whether error check is skipped or not. Default value is `false`. -### count() -> Number +```js +var g = gate.create(); +fs.readFile('file1', 'utf8', g.latch({err: 0, data: 1}, true)); +fs.readFile('file2', 'utf8', g.latch({err: 0, data: 1}, true)); + +g.await(function (err, results) { + console.log(results[0].err); // read error file1 + console.log(results[0].data); // content for file1 + console.log(results[1].err); // read error for file2 + console.log(results[1].data); // content for file2 +}); -Gets a current count, if a count is given with `gate.latch()`. -Otherwise, `-1` is returned. +``` #### val(Object value) -> Object -Wraps a value to distinguish between a value as argument and a mapping index. +Indicates that a value is a plain value and it's not a mapping index. + +* `value`: Required. A plain value. -* `value`: Required. A value. +```js +var g = gate.create(); + +// a number for a `data` property is a mapping index, but a number for `g.val()` is a plain value +fs.readFile('file1', 'utf8', g.latch({data: 1, i: g.val(1)})); +fs.readFile('file2', 'utf8', g.latch({data: 1, i: g.val(2)})); + +g.await(function (err, results) { + if (err) throw err; + console.log(results[0].data); // content for file1 + console.log(results[0].i); // 1 + console.log(results[1].data); // content for file2 + console.log(results[1].i); // 2 +}); +``` #### await(Function callback(err, results)) -> Function Awaits all asynchronous calls completion and then runs a `callback`. * `callback`: Required. A callback to run after all asynchronous calls completion. * `err`: Required. An error to indicate any asynhronous calls are failed. -If the `err` is `object` type, it have a property `callbackLocation` to inform which async call is related to the `err`. -* `results`: Required. An array to contain each asynchronous call result as element. +If the `err` exists, it have a property `gate_location` to inform which async call is related to the `err`. +* `results`: Required. An array to contain each asynchronous call result(arguments of asynchronous callback) as element. + +```js +var g = gate.create(); +fs.readFile('file1', 'utf8', g.latch({data: 1})); +fs.readFile('file2', 'utf8', g.latch({data: 1})); + +g.await(function (err, results) { + if (err) { + console.log(err.gate_location); // error location + console.log(err); + } else { + console.log(results[0].data); + console.log(results[1].data); + } +}); + +``` + +### count: Number + + +Gets a current count, if a count is given to `gate.latch()`. +Otherwise, `-1` is returned. +This is a readonly property. + +```js +var gate = require('gate'); +var fs = require('fs'); + +var g = gate.create(2); + +console.log(g.count); // 2 +fs.readFile('file1', 'utf8', g.latch({data: 1})); +console.log(g.count); // 1 +fs.readFile('file2', 'utf8', g.latch({data: 1})); +console.log(g.count); // 0 +``` + ## More Examples @@ -81,66 +210,66 @@ var gate = require('gate'); var fs = require('fs'); var exec = require('child_process').exec; -var latch = gate.latch(); +var g = gate.create(); // single mapping: arguments[1] in the callback will be result fs.readFile('file1', 'utf8', latch(1)); // multiple mapping: arguments[1] and argments[2] in the callback will be result -exec('cat *.js bad_file | wc -l', latch({stdout: 1, stderr: 2})); +exec('cat *.js bad_file | wc -l', g.latch({stdout: 1, stderr: 2})); // all mapping: arguments will be result -fs.readFile('file2', 'utf8', latch()); +fs.readFile('file2', 'utf8', g.latch()); -latch.await(function (err, results) { +g.await(function (err, results) { if (err !== null) { console.log('exec error: ' + err); } console.log('file1: ' + results[0]); - console.log('stdout[1]: ' + results[1].stdout); - console.log('stderr[1]: ' + results[1].stderr); + console.log('stdout: ' + results[1].stdout); + console.log('stderr: ' + results[1].stderr); console.log('file2: ' + results[2]); }); ``` ### Count Down -Pass a count number to `gate.latch()` to wait until a set of callbacks being made. +Pass a count number to `gate.create()` to wait until a set of callbacks being completed. ```js var gate = require('gate'); var fs = require('fs'); var files = ['file1', 'file2']; -var latch = gate.latch(files.length); -latch.await(function (err, results) { +var g = gate.create(files.length); +g.await(function (err, results) { if (err) throw err; - console.log(results[0]); // { name: 'file1', data: 'FILE1' } - console.log(results[1]); // { name: 'file2', data: 'FILE2' } + console.log(results[0]); + console.log(results[1]); }); process.nextTick(function () { files.forEach(function (file) { - fs.readFile(file, 'utf8', latch({name: file, data: 1})); + fs.readFile(file, 'utf8', g.latch({name: file, data: 1})); }); }); ``` ### Error Handling -Check `err.callbackLocation` at an await callback to know which async call is related to the `err`. +Check `err.gate_location` at an await callback to know which async call is related to the `err`. ```js var gate = require('gate'); var fs = require('fs'); -var latch = gate.latch(); -fs.readFile('file1', 'utf8', latch({name: 'file1', data: 1})); -fs.readFile('non-existent', 'utf8', latch({name: 'non-existent', data: 1})); +var g = gate.create(); +fs.readFile('file1', 'utf8', g.latch({name: 'file1', data: 1})); +fs.readFile('non-existent', 'utf8', g.latch({name: 'non-existent', data: 1})); -latch.await(function (err, results) { +g.await(function (err, results) { if (err) { - console.log(err + ', callbackLocation: ' + err.callbackLocation); + console.log(err + ', gate_location: ' + err.gate_location); } else { console.log(results); } @@ -149,18 +278,18 @@ latch.await(function (err, results) { ### Error Check Skipping -Pass `true` as 2nd argument to a function being returned from `gate.latch()`. +Pass `true` as 2nd argument to a function being returned from `gate.create()`. This is useful to check each error one by one. ```js var gate = require('gate'); var fs = require('fs'); -var latch = gate.latch(); -fs.readFile('non-existent1', 'utf8', latch({err: 0, data: 1}, true)); -fs.readFile('non-existent2', 'utf8', latch({err: 0, data: 1}, true)); +var g = gate.create(); +fs.readFile('non-existent1', 'utf8', g.latch({err: 0, data: 1}, true)); +fs.readFile('non-existent2', 'utf8', g.latch({err: 0, data: 1}, true)); -latch.await(function (err, results) { +g.await(function (err, results) { results.forEach(function (result) { if (result.err) { console.log(result.err); @@ -169,83 +298,25 @@ latch.await(function (err, results) { }); ``` -### Loop - -Call a function being returned from `gate.latch()` in a loop. - -```js -var gate = require('gate'); -var fs = require('fs'); - -var latch = gate.latch(); -['file1', 'file2'].forEach(function (file) { - fs.readFile(file, 'utf8', latch({name: file, data: 1})); -}); - -latch.await(function (err, results) { - if (err) throw err; - console.log(results[0]); // { name: 'file1', data: 'FILE1' } - console.log(results[1]); // { name: 'file2', data: 'FILE2' } -}); -``` - ### Loop in Parallel -Use [Parray](https://github.com/nakamura-to/parray) to loop array elements in parallel. +Use [Parray](https://github.com/nakamura-to/parray) to loop large array elements in parallel. ```js var gate = require('gate'); var parray = require('parray'); var fs = require('fs'); var files = ['file1', 'file2']; -var latch = gate.latch(); +var g = gate.create(); parray.forEach(files, function (file) { - fs.readFile(file, 'utf8', latch({name: file, data: 1})); + fs.readFile(file, 'utf8', g.latch({name: file, data: 1})); }, function () { - latch.await(function (err, results) { + g.await(function (err, results) { if (err) throw err; - console.log(results[0]); // { name: 'file1', data: 'FILE1' } - console.log(results[1]); // { name: 'file2', data: 'FILE2' } + console.log(results[0]); + console.log(results[1]); console.log('done'); }); }); ``` - -## Best Practices - -### Wrap Asynchronous Callbacks - -Wrap asynchronous callbacks to manage error handling. -For example, define a following function. - -```js -function bind(callback) { - return function (err) { - if (err) { - err.message += ', callbackLocation: ' + err.callbackLocation; - throw err; - } - callback.apply(null, Array.prototype.slice.call(arguments, 1)); - } -} -``` - -Use above function as follows: - -```js -var gate = require('gate'); -var fs = require('fs'); - -var latch = gate.latch(); -fs.readFile('path1', 'utf8', latch(1)); -fs.readFile('path2', 'utf8', latch(1)); -latch.await(bind(function (results) { - fs.writeFile('path3', results[0] + results[1], bind(function () { - fs.readFile('path3', 'utf8', bind(function (data) { - console.log(data); - console.log('all done'); - })); - })); -})); -``` 0 comments on commit f6ff4f6 Please sign in to comment. Something went wrong with that request. Please try again.
__label__pos
0.999616
The Silver Lining Lessons & Learnings from a salesforce certified technical architect. A Beginner’s Guide to Object-Oriented Programming with Apex: 1. Encapsulation with 4 comments This is part 2 in the series “A Beginner’s Guide to Object-Oriented Programming with Apex” and will cover the aspect of Encapsulation. A Beginner’s Guide to Object-Oriented Programming with Apex 1. Introduction 2. Encapsulation (this post) 3. Abstraction 4. Polymorphism What the hell is Encapsulation? Encapsulation isn’t a tricky concept in itself but some confusion does arise from it’s close relationship with Abstraction (to be covered in my next post). Broadly it is defined as one of, or both of the following: 1. An information hiding mechanism. 2. A bundling of data and methods that operate on that data. These are pretty abstract statements but in reality are very simple. I will demystify them with examples below but for now let’s use an analogy: Imagine you have a watch (or if you have one just consider it, don’t worry about the imagining part) that tells you the date and time. You can adjust the date and time using buttons/knobs/switches but you have no idea what happens inside the watch, just that it results in the date and time changing. You have some data (the date and time), you have some methods to operate on that data (buttons/knobs/switches) and there is a whole lot of stuff going on in the background that you don’t know about (and it’s better that way, imagine you had to adjust all those bits and pieces yourself). That’s encapsulation baby! What does “An information hiding mechanism” mean? This statement essentially means that some data is hidden from the consumer of the service provided e.g. • You have a VF page that has a button that is used to calculate the cost of an item. • Calculating the cost is done in the page controller and requires a number of different values used in a complex calculation. • The page doesn’t need to “see” the variables or know how the calculation works, and in fact you want to prevent anyone from messing with the variable values. • Thus we make the calculated value available to the page (or any other consumer) and hide everything else. What does “A bundling of data and methods that operate on that data” mean? This definition is even easier to understand, it’s simply stating that a piece of code should be grouped together with the data and actions important to that code’s purpose. This will become more clear with the example below. How do we Encapsulate? Fulfilling the two requirements of encapsulation with Apex is a cinch. The language mechanisms available are: I’ve linked to the relevant documentation for each since they’re great and I encourage you to bookmark them as they’re core to OOP in Apex. Time to look at an example. Below is a class that encapsulates the functionality of making an HTTP callout. I’ve numbered the comments and the corresponding explanations follow the code. /* * 1. HttpService - demonstrate the concept of Encapsulation */ public Class HttpService { // 2. Private variables cannot be accessed by other code outside of this class. // Those marked as &amp;amp;amp;amp;quot;final&amp;amp;amp;amp;quot; are reference values private final String HEADER_PROP_AUTH = 'Authorisation'; private final String HEADER_PROP_USERNAME = 'Username'; private final String HEADER_PROP_PWD = 'Password'; private String baseUrl = 'http://example.com'; /* * 3. Public Methods */ // 4. Public method that allows consumers of this service class to build an HttpRequest object public HttpRequest buildRequest(String url, String method, Map&amp;amp;amp;amp;lt;String, String&amp;amp;amp;amp;gt; headerProperties) { HttpRequest req = new HttpRequest(); req.setMethod(method); req.setEndpoint(url); for(String property: headerProperties.keySet()){ String value = headerProperties.get(property); // 5. A special action is required for the auth header value if (property == HEADER_PROP_AUTH){ value = buildAuthHeader(headerProperties); } req.setHeader(property, value); } return req; } // 8. This is an &amp;amp;amp;amp;quot;overloaded&amp;amp;amp;amp;quot; method with a simple signature for convenience i.e. // it defaults the value for the URL. public HttpRequest buildRequest(String method, Map&amp;amp;amp;amp;lt;String, String&amp;amp;amp;amp;gt; headerProperties){ return buildRequest(baseUrl, method, headerProperties); } // ... more public methods to do with the HTTP service go here. They have been omitted for // brevity ... /* * 6. Private Methods */ // 7. Private method used to build the authentication header. This method is only available to // other methods of this class. private String buildAuthHeader(Map&amp;amp;amp;amp;lt;String, String&amp;amp;amp;amp;gt; headerProperties){ String username = headerProperties.get(HEADER_PROP_USERNAME); String pwd = headerProperties.get(HEADER_PROP_PWD); Blob headerValue = Blob.valueOf(username + ':' + pwd); String authorisationHeader = EncodingUtil.base64Encode(headerValue); return authorisationHeader; } } The numbers below correspond with the numbers in the comments in the code above. 1. The very foundation of encapsulation is the concept of a Class. A mechanism for collecting data and functionality that belongs together given the context of the problem we’re trying to solve. 2. The access modifier “Private” guarantees that these variables can’t be read from or written to from outside of this class. They are “hidden”. 3. The public methods provide a mechanism for other classes to access the functionality and data that this class wants to share. 4. I have omitted the other methods you would need for a full-blown HttpService since this method illustrates the point. 5. A point of interest in that for the Auth header property a special action is required. However the fact that this needs to occur is not relevant to the consumer of this method i.e. it is hidden. 6. As with the private variables, methods marked as “private” cannot be seen outside of this class. 7. Data is transformed into new data within this method, but that secret is only known within this class. 8. Overloading isn’t a facet of encapsulation but it’s a neat trick. This overloaded method has one less parameter than the other method, a small convenience to spare your finger joints. Why Should I Use Encapsulation? • Provides namespaces/unique names for application data and functionality making code more manageable e.g. in the old days when classes didn’t exist and all your code would be lumped together you could only have one function called calculate(). With classes you can group functionality into classes and not have to think up crazy and creative names for your variables and methods e.g. Tax.calculate() and IQ.calculate(). • Protecting data that is read-only or irrelevant to the consumer. • Control over how variables are managed e.g. validating input before assigning it to a variable. • Code is easier to read, maintain and extend. • OOP is impossible without it! If you’re a developer you’ve probably been using Encapsulation without even knowing it. Hopefully this post has formalised your knowledge and filled in a few gaps. Onwards and upwards! Next time I’ll be writing about Abstraction. Written by Wes June 17, 2014 at 6:09 pm Posted in Apex, SalesForce Tagged with , , 4 Responses Subscribe to comments with RSS. 1. Great post !!! Waiting for next. Thanks for your effort to teach people. Ravi Kant June 18, 2014 at 7:00 am Leave a Reply Fill in your details below or click an icon to log in: WordPress.com Logo You are commenting using your WordPress.com account. Log Out / Change ) Twitter picture You are commenting using your Twitter account. Log Out / Change ) Facebook photo You are commenting using your Facebook account. Log Out / Change ) Google+ photo You are commenting using your Google+ account. Log Out / Change ) Connecting to %s Follow Get every new post delivered to your Inbox. Join 2,355 other followers %d bloggers like this:
__label__pos
0.734855
0 Swiftで以下のコードを試してみました。(参考にしたコード) import UIKit class ViewController: UIViewController, UIPickerViewDelegate, UIPickerViewDataSource { //ユーザー設定 let userDefaults = UserDefaults.standard var choices = ["Toyota","Honda","Chevy","Audi","BMW"] var pickerView = UIPickerView() var typeValue = String() override func viewDidLoad() { super.viewDidLoad() self.userDefaults.register(defaults: ["pickerviewSelectRow": 0]) } //MARK - PickerView func numberOfComponents(in pickerView: UIPickerView) -> Int { return 1 } func pickerView(_ pickerView: UIPickerView, numberOfRowsInComponent component: Int) -> Int { return choices.count } func pickerView(_ pickerView: UIPickerView, titleForRow row: Int, forComponent component: Int) -> String? { return choices[row] } func pickerView(_ pickerView: UIPickerView, didSelectRow row: Int, inComponent component: Int) { if row == 0 { typeValue = "Toyota" } else if row == 1 { typeValue = "Honda" } else if row == 2 { typeValue = "Chevy" } else if row == 3 { typeValue = "Audi" } else if row == 4 { typeValue = "BMW" } self.userDefaults.set(row, forKey: "pickerviewSelectRow") self.userDefaults.synchronize() } //MARK - UIAlertController @IBAction func showChoices(_ sender: Any) { let alert = UIAlertController(title: "Car Choices", message: "\n\n\n\n\n\n", preferredStyle: .alert) //alert.isModalInPopover = true let pickerFrame = UIPickerView(frame: CGRect(x: 5, y: 20, width: 250, height: 140)) let pickerviewSelectRow = self.userDefaults.object(forKey: "pickerviewSelectRow") as! Int pickerFrame.selectRow(2, inComponent: 0, animated: true) // 初期値 liveStreamPickerViewSelect pickerFrame.dataSource = self pickerFrame.delegate = self alert.view.addSubview(pickerFrame) alert.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: nil)) alert.addAction(UIAlertAction(title: "OK", style: .default, handler: { (UIAlertAction) in print("You selected " + self.typeValue ) })) self.present(alert,animated: true, completion: nil ) } } userdefaultに値は保存されてはいるようなのですが、「Honda」などを選択してpickerviewを閉じたあとに、初期値をselectRowで指定しても毎回「Toyota」でpickerviewが起動してしまいます。 何が原因でしょうか? 1 件の回答 1 1 何が原因でしょうか? selectRow(_:inComponent:animated:)を呼び出すタイミングの問題でしょう。 私が試したところ、dataSourceを設定した後なら動作するようです。 pickerFrame.dataSource = self pickerFrame.selectRow(2/*pickerviewSelectRow*/, inComponent: 0, animated: true) // 初期値 pickerFrame.delegate = self alert.view.addSubview(pickerFrame) dataSourceが設定されるまで、UIPickerViewには選択肢がいくつあるかもわからないので、その中のどれかを選択しろと言われても困ると言うことだろうと思います。 ちなみにUIAlertControllerのview階層を直接いじると言ったことはApple側は想定していないので、別のバージョンのiOSで試してもうまくいくか(動作確認はXcode 12.0.1)は保証できません。 確実に動作させたい場合には、alertに似せた別画面(view controller)を作ってそれを表示してやると言ったことをしたほうがいいかもしれません。 ところで、あなたのコードでは現在pickerViewなんてプロパティは全く使われていませんね。参考サイトのコードをコピペした状態のままなのでしょうが、コードが大きくなり複雑化してくるにつれ、そう言った使われてもいないプロパティ存在の弊害も大きくなっていくので、「もう使われていない」と分かった時点でさっさと削除してしまった方が良いでしょう。 1 • 私の環境でも正常に動作しました。ありがとうございます。 – gncc Commented 2020年9月29日 2:45 この質問に回答するには、ログインする必要があります。 求めていた回答ではありませんか? のタグが付いた他の質問を参照する。
__label__pos
0.603508
Properties Label 77.2.m.a Level 77 Weight 2 Character orbit 77.m Analytic conductor 0.615 Analytic rank 0 Dimension 8 CM no Inner twists 4 Related objects Downloads Learn more about Newspace parameters Level: \( N \) = \( 77 = 7 \cdot 11 \) Weight: \( k \) = \( 2 \) Character orbit: \([\chi]\) = 77.m (of order \(15\), degree \(8\), minimal) Newform invariants Self dual: no Analytic conductor: \(0.614848095564\) Analytic rank: \(0\) Dimension: \(8\) Coefficient field: \(\Q(\zeta_{15})\) Coefficient ring: \(\Z[a_1, a_2, a_3]\) Coefficient ring index: \( 1 \) Twist minimal: yes Sato-Tate group: $\mathrm{SU}(2)[C_{15}]$ $q$-expansion Coefficients of the \(q\)-expansion are expressed in terms of a primitive root of unity \(\zeta_{15}\). We also show the integral \(q\)-expansion of the trace form. \(f(q)\) \(=\) \( q + ( -1 + \zeta_{15}^{4} - \zeta_{15}^{5} + \zeta_{15}^{7} ) q^{2} + ( -\zeta_{15} - \zeta_{15}^{6} ) q^{3} + ( -\zeta_{15} + \zeta_{15}^{2} - \zeta_{15}^{6} ) q^{4} + ( -1 + \zeta_{15} - \zeta_{15}^{4} - \zeta_{15}^{5} - \zeta_{15}^{7} ) q^{5} + ( 1 + \zeta_{15}^{3} + \zeta_{15}^{6} ) q^{6} + ( -2 + 2 \zeta_{15}^{2} - 2 \zeta_{15}^{3} - \zeta_{15}^{4} - 2 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{7} + ( 2 - 2 \zeta_{15}^{2} + \zeta_{15}^{6} - 2 \zeta_{15}^{7} ) q^{8} -2 \zeta_{15}^{7} q^{9} +O(q^{10})\) \( q + ( -1 + \zeta_{15}^{4} - \zeta_{15}^{5} + \zeta_{15}^{7} ) q^{2} + ( -\zeta_{15} - \zeta_{15}^{6} ) q^{3} + ( -\zeta_{15} + \zeta_{15}^{2} - \zeta_{15}^{6} ) q^{4} + ( -1 + \zeta_{15} - \zeta_{15}^{4} - \zeta_{15}^{5} - \zeta_{15}^{7} ) q^{5} + ( 1 + \zeta_{15}^{3} + \zeta_{15}^{6} ) q^{6} + ( -2 + 2 \zeta_{15}^{2} - 2 \zeta_{15}^{3} - \zeta_{15}^{4} - 2 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{7} + ( 2 - 2 \zeta_{15}^{2} + \zeta_{15}^{6} - 2 \zeta_{15}^{7} ) q^{8} -2 \zeta_{15}^{7} q^{9} + ( -1 + \zeta_{15} + \zeta_{15}^{2} - \zeta_{15}^{3} + \zeta_{15}^{4} + 2 \zeta_{15}^{5} + \zeta_{15}^{7} ) q^{10} + ( 2 - 4 \zeta_{15}^{2} + 2 \zeta_{15}^{3} - 2 \zeta_{15}^{4} - \zeta_{15}^{5} + 2 \zeta_{15}^{6} - 2 \zeta_{15}^{7} ) q^{11} + ( 1 - \zeta_{15} - \zeta_{15}^{4} + \zeta_{15}^{5} ) q^{12} + ( \zeta_{15}^{2} + 3 \zeta_{15}^{3} + 3 \zeta_{15}^{6} + \zeta_{15}^{7} ) q^{13} + ( 2 + \zeta_{15}^{2} + 3 \zeta_{15}^{5} - 2 \zeta_{15}^{7} ) q^{14} + ( -1 - \zeta_{15}^{2} - \zeta_{15}^{3} + \zeta_{15}^{6} - \zeta_{15}^{7} ) q^{15} + ( 3 \zeta_{15}^{4} + 3 \zeta_{15}^{7} ) q^{16} + ( -2 - 2 \zeta_{15}^{3} + 2 \zeta_{15}^{4} - 2 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{17} + ( -2 + 2 \zeta_{15} - 2 \zeta_{15}^{3} + 2 \zeta_{15}^{4} + 2 \zeta_{15}^{7} ) q^{18} + ( -3 + 6 \zeta_{15} - 3 \zeta_{15}^{5} + 3 \zeta_{15}^{7} ) q^{19} + ( -3 \zeta_{15}^{2} + \zeta_{15}^{3} + \zeta_{15}^{6} - 3 \zeta_{15}^{7} ) q^{20} + ( -1 + 2 \zeta_{15}^{5} ) q^{21} + ( 2 \zeta_{15}^{2} + 3 \zeta_{15}^{3} + \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{22} + ( 5 - 2 \zeta_{15} - 2 \zeta_{15}^{4} + 5 \zeta_{15}^{5} ) q^{23} + ( -2 + \zeta_{15}^{2} - 2 \zeta_{15}^{3} + 2 \zeta_{15}^{4} - 2 \zeta_{15}^{5} - 2 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{24} + ( -4 \zeta_{15} - 7 \zeta_{15}^{4} - 4 \zeta_{15}^{7} ) q^{26} -5 \zeta_{15}^{3} q^{27} + ( -1 - 2 \zeta_{15} + 2 \zeta_{15}^{5} - 3 \zeta_{15}^{6} ) q^{28} + ( 6 - 3 \zeta_{15}^{2} + 6 \zeta_{15}^{3} + 3 \zeta_{15}^{6} - 3 \zeta_{15}^{7} ) q^{29} + ( 1 + 2 \zeta_{15} + \zeta_{15}^{5} - \zeta_{15}^{7} ) q^{30} + ( -1 + 6 \zeta_{15}^{2} - \zeta_{15}^{3} + \zeta_{15}^{4} - \zeta_{15}^{5} - \zeta_{15}^{6} + \zeta_{15}^{7} ) q^{31} + ( -1 + \zeta_{15} + \zeta_{15}^{2} - \zeta_{15}^{3} + \zeta_{15}^{4} - 5 \zeta_{15}^{5} + \zeta_{15}^{7} ) q^{32} + ( -4 + \zeta_{15} + 2 \zeta_{15}^{4} - 4 \zeta_{15}^{5} + 2 \zeta_{15}^{7} ) q^{33} + ( 4 - 2 \zeta_{15}^{2} + 2 \zeta_{15}^{3} - 2 \zeta_{15}^{7} ) q^{34} + ( -6 + \zeta_{15}^{2} - 2 \zeta_{15}^{3} + 6 \zeta_{15}^{4} - 6 \zeta_{15}^{5} - 2 \zeta_{15}^{6} + 4 \zeta_{15}^{7} ) q^{35} + ( 2 - 2 \zeta_{15}^{2} + 2 \zeta_{15}^{6} - 2 \zeta_{15}^{7} ) q^{36} + ( -5 \zeta_{15} - 3 \zeta_{15}^{4} - 5 \zeta_{15}^{7} ) q^{37} + ( -3 \zeta_{15} + 3 \zeta_{15}^{5} - 3 \zeta_{15}^{6} ) q^{38} + ( 4 - \zeta_{15} + 4 \zeta_{15}^{3} - 4 \zeta_{15}^{4} + \zeta_{15}^{5} + 3 \zeta_{15}^{6} - 4 \zeta_{15}^{7} ) q^{39} -5 \zeta_{15}^{4} q^{40} + ( -1 + \zeta_{15}^{2} - 4 \zeta_{15}^{6} + \zeta_{15}^{7} ) q^{41} + ( 1 - 2 \zeta_{15}^{3} - \zeta_{15}^{4} + \zeta_{15}^{5} - 2 \zeta_{15}^{6} - \zeta_{15}^{7} ) q^{42} + ( 2 + 3 \zeta_{15}^{2} - 3 \zeta_{15}^{3} + 3 \zeta_{15}^{7} ) q^{43} + ( -4 + 3 \zeta_{15} - 4 \zeta_{15}^{5} + \zeta_{15}^{7} ) q^{44} + ( 4 - 4 \zeta_{15} - 4 \zeta_{15}^{2} + 4 \zeta_{15}^{3} - 4 \zeta_{15}^{4} + 2 \zeta_{15}^{5} - 4 \zeta_{15}^{7} ) q^{45} + ( -3 + 2 \zeta_{15}^{2} - 3 \zeta_{15}^{3} + 3 \zeta_{15}^{4} - 3 \zeta_{15}^{5} - 3 \zeta_{15}^{6} + 3 \zeta_{15}^{7} ) q^{46} + ( 4 - 6 \zeta_{15} + 4 \zeta_{15}^{5} - 4 \zeta_{15}^{7} ) q^{47} + ( 3 + 3 \zeta_{15}^{3} ) q^{48} + ( -5 + 5 \zeta_{15} + 3 \zeta_{15}^{3} + 5 \zeta_{15}^{4} - 5 \zeta_{15}^{5} + 5 \zeta_{15}^{7} ) q^{49} + ( 2 \zeta_{15} + 2 \zeta_{15}^{4} + 2 \zeta_{15}^{7} ) q^{51} + ( 2 \zeta_{15} + \zeta_{15}^{2} + \zeta_{15}^{5} + 2 \zeta_{15}^{6} ) q^{52} + ( 5 - 2 \zeta_{15}^{2} + 5 \zeta_{15}^{3} - 5 \zeta_{15}^{4} + 5 \zeta_{15}^{5} + 5 \zeta_{15}^{6} - 5 \zeta_{15}^{7} ) q^{53} + ( 5 \zeta_{15} + 5 \zeta_{15}^{4} ) q^{54} + ( -4 + 6 \zeta_{15}^{2} - 9 \zeta_{15}^{3} - 6 \zeta_{15}^{6} + 6 \zeta_{15}^{7} ) q^{55} + ( -1 - 2 \zeta_{15} + 4 \zeta_{15}^{2} - 4 \zeta_{15}^{3} - 2 \zeta_{15}^{4} + \zeta_{15}^{5} + 4 \zeta_{15}^{7} ) q^{56} + ( -6 \zeta_{15}^{2} + 3 \zeta_{15}^{3} + 3 \zeta_{15}^{6} - 6 \zeta_{15}^{7} ) q^{57} + ( -6 - 3 \zeta_{15} - 6 \zeta_{15}^{5} + 6 \zeta_{15}^{7} ) q^{58} + ( -6 + 6 \zeta_{15} - 6 \zeta_{15}^{3} + 6 \zeta_{15}^{4} + 6 \zeta_{15}^{7} ) q^{59} + ( -2 + 3 \zeta_{15} - 2 \zeta_{15}^{3} + 2 \zeta_{15}^{4} - 3 \zeta_{15}^{5} + \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{60} + ( 2 - 2 \zeta_{15} + 6 \zeta_{15}^{4} + 2 \zeta_{15}^{5} + 6 \zeta_{15}^{7} ) q^{61} + ( -4 - \zeta_{15}^{2} - 4 \zeta_{15}^{3} + \zeta_{15}^{6} - \zeta_{15}^{7} ) q^{62} + ( -6 \zeta_{15} - 2 \zeta_{15}^{6} ) q^{63} + ( -\zeta_{15}^{2} - 2 \zeta_{15}^{3} - 2 \zeta_{15}^{6} - \zeta_{15}^{7} ) q^{64} + ( 5 + 5 \zeta_{15} + 5 \zeta_{15}^{4} + 5 \zeta_{15}^{5} ) q^{65} + ( 5 - 2 \zeta_{15} - 2 \zeta_{15}^{2} + 5 \zeta_{15}^{3} - 5 \zeta_{15}^{4} + 2 \zeta_{15}^{5} + 3 \zeta_{15}^{6} - 5 \zeta_{15}^{7} ) q^{66} + ( 1 - \zeta_{15} - \zeta_{15}^{2} + \zeta_{15}^{3} - \zeta_{15}^{4} + 10 \zeta_{15}^{5} - \zeta_{15}^{7} ) q^{67} + 2 \zeta_{15}^{7} q^{68} + ( -2 + 2 \zeta_{15}^{2} - 5 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{69} + ( 9 - 2 \zeta_{15} - 6 \zeta_{15}^{2} + 9 \zeta_{15}^{3} - 6 \zeta_{15}^{4} + 6 \zeta_{15}^{6} - 8 \zeta_{15}^{7} ) q^{70} + ( -2 + 2 \zeta_{15}^{3} - 2 \zeta_{15}^{6} ) q^{71} + ( -2 + 2 \zeta_{15} - 2 \zeta_{15}^{4} - 2 \zeta_{15}^{5} - 2 \zeta_{15}^{7} ) q^{72} + ( 9 - 3 \zeta_{15} - 6 \zeta_{15}^{2} + 9 \zeta_{15}^{3} - 9 \zeta_{15}^{4} + 6 \zeta_{15}^{6} - 9 \zeta_{15}^{7} ) q^{73} + ( 5 \zeta_{15} + 3 \zeta_{15}^{2} + 3 \zeta_{15}^{5} + 5 \zeta_{15}^{6} ) q^{74} + ( -3 - 6 \zeta_{15}^{2} + 6 \zeta_{15}^{3} - 6 \zeta_{15}^{7} ) q^{76} + ( -7 + 8 \zeta_{15} + 3 \zeta_{15}^{2} - 9 \zeta_{15}^{3} + 6 \zeta_{15}^{4} - 4 \zeta_{15}^{5} + 3 \zeta_{15}^{6} + 7 \zeta_{15}^{7} ) q^{77} + ( -7 + 4 \zeta_{15}^{2} - 4 \zeta_{15}^{3} + 4 \zeta_{15}^{7} ) q^{78} + ( 6 - 6 \zeta_{15}^{4} + 6 \zeta_{15}^{5} - 9 \zeta_{15}^{7} ) q^{79} + ( 6 \zeta_{15} + 3 \zeta_{15}^{2} + 3 \zeta_{15}^{5} + 6 \zeta_{15}^{6} ) q^{80} + ( 1 - \zeta_{15}^{2} + \zeta_{15}^{3} - \zeta_{15}^{4} + \zeta_{15}^{6} - \zeta_{15}^{7} ) q^{81} + ( 1 - \zeta_{15} + 2 \zeta_{15}^{4} + \zeta_{15}^{5} + 2 \zeta_{15}^{7} ) q^{82} -9 \zeta_{15}^{3} q^{83} + ( 3 \zeta_{15} - \zeta_{15}^{2} + \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{84} + ( -2 + 2 \zeta_{15}^{2} + 4 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{85} + ( -2 + 2 \zeta_{15}^{4} - 2 \zeta_{15}^{5} - \zeta_{15}^{7} ) q^{86} + ( 3 - 3 \zeta_{15} - 3 \zeta_{15}^{2} + 3 \zeta_{15}^{3} - 3 \zeta_{15}^{4} - 3 \zeta_{15}^{5} - 3 \zeta_{15}^{7} ) q^{87} + ( 2 - 5 \zeta_{15} - 6 \zeta_{15}^{2} + 2 \zeta_{15}^{3} - 2 \zeta_{15}^{4} + 2 \zeta_{15}^{5} - 3 \zeta_{15}^{6} - 2 \zeta_{15}^{7} ) q^{88} + ( -8 + 4 \zeta_{15} + 4 \zeta_{15}^{4} - 8 \zeta_{15}^{5} ) q^{89} + ( 4 \zeta_{15}^{2} + 2 \zeta_{15}^{3} + 2 \zeta_{15}^{6} + 4 \zeta_{15}^{7} ) q^{90} + ( 9 + \zeta_{15} - 6 \zeta_{15}^{2} + 3 \zeta_{15}^{5} - 2 \zeta_{15}^{6} - 9 \zeta_{15}^{7} ) q^{91} + ( -2 + 7 \zeta_{15}^{2} - 2 \zeta_{15}^{3} - 7 \zeta_{15}^{6} + 7 \zeta_{15}^{7} ) q^{92} + ( 6 - 6 \zeta_{15} - 5 \zeta_{15}^{4} + 6 \zeta_{15}^{5} - 5 \zeta_{15}^{7} ) q^{93} + ( -2 + 4 \zeta_{15} - 2 \zeta_{15}^{3} + 2 \zeta_{15}^{4} - 4 \zeta_{15}^{5} + 2 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{94} + ( -3 - 9 \zeta_{15} + 12 \zeta_{15}^{2} - 3 \zeta_{15}^{3} + 3 \zeta_{15}^{4} - 12 \zeta_{15}^{6} + 3 \zeta_{15}^{7} ) q^{95} + ( 1 - 5 \zeta_{15} + \zeta_{15}^{5} - \zeta_{15}^{7} ) q^{96} + ( -\zeta_{15}^{2} + 3 \zeta_{15}^{3} + 3 \zeta_{15}^{6} - \zeta_{15}^{7} ) q^{97} + ( 5 - 8 \zeta_{15} - 5 \zeta_{15}^{2} + 5 \zeta_{15}^{3} - 8 \zeta_{15}^{4} - 5 \zeta_{15}^{7} ) q^{98} + ( -4 + 2 \zeta_{15}^{2} - 4 \zeta_{15}^{3} - 8 \zeta_{15}^{6} + 2 \zeta_{15}^{7} ) q^{99} +O(q^{100})\) \(\operatorname{Tr}(f)(q)\) \(=\) \( 8q - 2q^{2} + q^{3} + 2q^{4} - 5q^{5} + 4q^{6} - 5q^{7} + 10q^{8} - 2q^{9} + O(q^{10}) \) \( 8q - 2q^{2} + q^{3} + 2q^{4} - 5q^{5} + 4q^{6} - 5q^{7} + 10q^{8} - 2q^{9} - 10q^{10} + 4q^{11} + 2q^{12} - 10q^{13} + 3q^{14} - 10q^{15} + 6q^{16} - 4q^{17} - 6q^{18} - 3q^{19} - 10q^{20} - 16q^{21} - 4q^{22} + 16q^{23} + 5q^{24} - 15q^{26} + 10q^{27} - 12q^{28} + 24q^{29} + 5q^{30} + 8q^{31} + 18q^{32} - 11q^{33} + 24q^{34} - 5q^{35} + 8q^{36} - 13q^{37} - 9q^{38} + 5q^{39} - 5q^{40} + 2q^{41} + 10q^{42} + 28q^{43} - 12q^{44} + 8q^{46} + 6q^{47} + 18q^{48} - 11q^{49} + 6q^{51} - 5q^{52} - 12q^{53} + 10q^{54} + 10q^{55} - 24q^{57} - 21q^{58} - 18q^{59} + 5q^{60} + 18q^{61} - 28q^{62} - 2q^{63} + 6q^{64} + 30q^{65} + 2q^{66} - 38q^{67} + 2q^{68} - 2q^{69} + 20q^{70} - 16q^{71} - 10q^{72} + 15q^{73} - 14q^{74} - 48q^{76} - 4q^{77} - 40q^{78} + 9q^{79} - 15q^{80} + q^{81} + 7q^{82} + 18q^{83} + 2q^{84} - 20q^{85} - 7q^{86} + 18q^{87} - 5q^{88} - 24q^{89} + 50q^{91} + 16q^{92} + 8q^{93} + 8q^{94} + 15q^{95} - 2q^{96} - 14q^{97} + 4q^{98} - 4q^{99} + O(q^{100}) \) Character values We give the values of \(\chi\) on generators for \(\left(\mathbb{Z}/77\mathbb{Z}\right)^\times\). \(n\) \(45\) \(57\) \(\chi(n)\) \(-1 - \zeta_{15}^{5}\) \(-1 + \zeta_{15}^{2} - \zeta_{15}^{3} - \zeta_{15}^{6} + \zeta_{15}^{7}\) Embeddings For each embedding \(\iota_m\) of the coefficient field, the values \(\iota_m(a_n)\) are shown below. For more information on an embedded modular form you can click on its label. Label \(\iota_m(\nu)\) \( a_{2} \) \( a_{3} \) \( a_{4} \) \( a_{5} \) \( a_{6} \) \( a_{7} \) \( a_{8} \) \( a_{9} \) \( a_{10} \) 4.1 −0.978148 + 0.207912i −0.104528 0.994522i 0.913545 0.406737i 0.669131 0.743145i 0.669131 + 0.743145i 0.913545 + 0.406737i −0.978148 0.207912i −0.104528 + 0.994522i 0.0646021 0.614648i 0.669131 + 0.743145i 1.58268 + 0.336408i −2.04275 0.909491i 0.500000 0.363271i −0.0510966 + 2.64526i 0.690983 2.12663i 0.209057 1.98904i −0.690983 + 1.19682i 9.1 1.08268 + 1.20243i 0.913545 + 0.406737i −0.0646021 + 0.614648i −2.18720 0.464905i 0.500000 + 1.53884i −2.53158 0.768834i 1.80902 1.31433i −1.33826 1.48629i −1.80902 3.13331i 16.1 −1.58268 0.336408i −0.104528 + 0.994522i 0.564602 + 0.251377i 1.49622 + 1.66172i 0.500000 1.53884i −1.51351 + 2.17009i 1.80902 + 1.31433i 1.95630 + 0.415823i −1.80902 3.13331i 25.1 −0.564602 0.251377i −0.978148 0.207912i −1.08268 1.20243i 0.233733 2.22382i 0.500000 + 0.363271i 1.59618 2.11002i 0.690983 + 2.12663i −1.82709 0.813473i −0.690983 + 1.19682i 37.1 −0.564602 + 0.251377i −0.978148 + 0.207912i −1.08268 + 1.20243i 0.233733 + 2.22382i 0.500000 0.363271i 1.59618 + 2.11002i 0.690983 2.12663i −1.82709 + 0.813473i −0.690983 1.19682i 53.1 −1.58268 + 0.336408i −0.104528 0.994522i 0.564602 0.251377i 1.49622 1.66172i 0.500000 + 1.53884i −1.51351 2.17009i 1.80902 1.31433i 1.95630 0.415823i −1.80902 + 3.13331i 58.1 0.0646021 + 0.614648i 0.669131 0.743145i 1.58268 0.336408i −2.04275 + 0.909491i 0.500000 + 0.363271i −0.0510966 2.64526i 0.690983 + 2.12663i 0.209057 + 1.98904i −0.690983 1.19682i 60.1 1.08268 1.20243i 0.913545 0.406737i −0.0646021 0.614648i −2.18720 + 0.464905i 0.500000 1.53884i −2.53158 + 0.768834i 1.80902 + 1.31433i −1.33826 + 1.48629i −1.80902 + 3.13331i \(n\): e.g. 2-40 or 990-1000 Embeddings: e.g. 1-3 or 60.1 Significant digits: Format: Inner twists Char Parity Ord Mult Type 1.a even 1 1 trivial 7.c even 3 1 inner 11.c even 5 1 inner 77.m even 15 1 inner Twists        By twisting character orbit Char Parity Ord Mult Type Twist Min Dim 1.a even 1 1 trivial 77.2.m.a 8 3.b odd 2 1 693.2.by.a 8 7.b odd 2 1 539.2.q.a 8 7.c even 3 1 inner 77.2.m.a 8 7.c even 3 1 539.2.f.a 4 7.d odd 6 1 539.2.f.b 4 7.d odd 6 1 539.2.q.a 8 11.b odd 2 1 847.2.n.b 8 11.c even 5 1 inner 77.2.m.a 8 11.c even 5 1 847.2.e.a 4 11.c even 5 2 847.2.n.c 8 11.d odd 10 1 847.2.e.b 4 11.d odd 10 2 847.2.n.a 8 11.d odd 10 1 847.2.n.b 8 21.h odd 6 1 693.2.by.a 8 33.h odd 10 1 693.2.by.a 8 77.h odd 6 1 847.2.n.b 8 77.j odd 10 1 539.2.q.a 8 77.m even 15 1 inner 77.2.m.a 8 77.m even 15 1 539.2.f.a 4 77.m even 15 1 847.2.e.a 4 77.m even 15 2 847.2.n.c 8 77.m even 15 1 5929.2.a.q 2 77.n even 30 1 5929.2.a.j 2 77.o odd 30 1 847.2.e.b 4 77.o odd 30 2 847.2.n.a 8 77.o odd 30 1 847.2.n.b 8 77.o odd 30 1 5929.2.a.l 2 77.p odd 30 1 539.2.f.b 4 77.p odd 30 1 539.2.q.a 8 77.p odd 30 1 5929.2.a.o 2 231.z odd 30 1 693.2.by.a 8              By twisted newform orbit Twist Min Dim Char Parity Ord Mult Type 77.2.m.a 8 1.a even 1 1 trivial 77.2.m.a 8 7.c even 3 1 inner 77.2.m.a 8 11.c even 5 1 inner 77.2.m.a 8 77.m even 15 1 inner 539.2.f.a 4 7.c even 3 1 539.2.f.a 4 77.m even 15 1 539.2.f.b 4 7.d odd 6 1 539.2.f.b 4 77.p odd 30 1 539.2.q.a 8 7.b odd 2 1 539.2.q.a 8 7.d odd 6 1 539.2.q.a 8 77.j odd 10 1 539.2.q.a 8 77.p odd 30 1 693.2.by.a 8 3.b odd 2 1 693.2.by.a 8 21.h odd 6 1 693.2.by.a 8 33.h odd 10 1 693.2.by.a 8 231.z odd 30 1 847.2.e.a 4 11.c even 5 1 847.2.e.a 4 77.m even 15 1 847.2.e.b 4 11.d odd 10 1 847.2.e.b 4 77.o odd 30 1 847.2.n.a 8 11.d odd 10 2 847.2.n.a 8 77.o odd 30 2 847.2.n.b 8 11.b odd 2 1 847.2.n.b 8 11.d odd 10 1 847.2.n.b 8 77.h odd 6 1 847.2.n.b 8 77.o odd 30 1 847.2.n.c 8 11.c even 5 2 847.2.n.c 8 77.m even 15 2 5929.2.a.j 2 77.n even 30 1 5929.2.a.l 2 77.o odd 30 1 5929.2.a.o 2 77.p odd 30 1 5929.2.a.q 2 77.m even 15 1 Hecke kernels This newform subspace can be constructed as the kernel of the linear operator \( T_{2}^{8} + 2 T_{2}^{7} + 2 T_{2}^{5} + 9 T_{2}^{4} + 8 T_{2}^{3} + 5 T_{2}^{2} + 3 T_{2} + 1 \) acting on \(S_{2}^{\mathrm{new}}(77, [\chi])\). Hecke Characteristic Polynomials $p$ $F_p(T)$ $2$ \( 1 + 2 T + 2 T^{2} - 6 T^{3} - 17 T^{4} - 24 T^{5} - T^{6} + 47 T^{7} + 103 T^{8} + 94 T^{9} - 4 T^{10} - 192 T^{11} - 272 T^{12} - 192 T^{13} + 128 T^{14} + 256 T^{15} + 256 T^{16} \) $3$ \( 1 - T + 3 T^{2} - 8 T^{3} + 8 T^{4} + 7 T^{5} + 6 T^{6} + 56 T^{7} - 137 T^{8} + 168 T^{9} + 54 T^{10} + 189 T^{11} + 648 T^{12} - 1944 T^{13} + 2187 T^{14} - 2187 T^{15} + 6561 T^{16} \) $5$ \( ( 1 + 5 T + 15 T^{2} + 25 T^{3} + 25 T^{4} )^{2}( 1 - 5 T + 10 T^{2} - 25 T^{3} + 75 T^{4} - 125 T^{5} + 250 T^{6} - 625 T^{7} + 625 T^{8} ) \) $7$ \( 1 + 5 T + 18 T^{2} + 55 T^{3} + 149 T^{4} + 385 T^{5} + 882 T^{6} + 1715 T^{7} + 2401 T^{8} \) $11$ \( 1 - 4 T + 10 T^{2} + 64 T^{3} - 261 T^{4} + 704 T^{5} + 1210 T^{6} - 5324 T^{7} + 14641 T^{8} \) $13$ \( ( 1 + 5 T + 27 T^{2} + 115 T^{3} + 584 T^{4} + 1495 T^{5} + 4563 T^{6} + 10985 T^{7} + 28561 T^{8} )^{2} \) $17$ \( 1 + 4 T + 17 T^{2} - 120 T^{3} - 740 T^{4} - 3144 T^{5} - 649 T^{6} + 49990 T^{7} + 275299 T^{8} + 849830 T^{9} - 187561 T^{10} - 15446472 T^{11} - 61805540 T^{12} - 170382840 T^{13} + 410338673 T^{14} + 1641354692 T^{15} + 6975757441 T^{16} \) $19$ \( 1 + 3 T - 26 T^{2} - 21 T^{3} + 252 T^{4} - 1302 T^{5} - 1792 T^{6} + 24084 T^{7} + 90161 T^{8} + 457596 T^{9} - 646912 T^{10} - 8930418 T^{11} + 32840892 T^{12} - 51998079 T^{13} - 1223192906 T^{14} + 2681615217 T^{15} + 16983563041 T^{16} \) $23$ \( ( 1 - 8 T + 7 T^{2} - 88 T^{3} + 1248 T^{4} - 2024 T^{5} + 3703 T^{6} - 97336 T^{7} + 279841 T^{8} )^{2} \) $29$ \( ( 1 - 12 T + 25 T^{2} + 288 T^{3} - 2471 T^{4} + 8352 T^{5} + 21025 T^{6} - 292668 T^{7} + 707281 T^{8} )^{2} \) $31$ \( ( 1 - 19 T + 210 T^{2} - 1691 T^{3} + 10649 T^{4} - 52421 T^{5} + 201810 T^{6} - 566029 T^{7} + 923521 T^{8} )( 1 + 11 T + 60 T^{2} - 11 T^{3} - 991 T^{4} - 341 T^{5} + 57660 T^{6} + 327701 T^{7} + 923521 T^{8} ) \) $37$ \( 1 + 13 T + 112 T^{2} + 71 T^{3} - 4102 T^{4} - 46406 T^{5} - 63616 T^{6} + 1263548 T^{7} + 15622363 T^{8} + 46751276 T^{9} - 87090304 T^{10} - 2350603118 T^{11} - 7687808422 T^{12} + 4923420947 T^{13} + 287361357808 T^{14} + 1234114402729 T^{15} + 3512479453921 T^{16} \) $41$ \( ( 1 - T - 25 T^{2} + 221 T^{3} + 1064 T^{4} + 9061 T^{5} - 42025 T^{6} - 68921 T^{7} + 2825761 T^{8} )^{2} \) $43$ \( ( 1 - 7 T + 87 T^{2} - 301 T^{3} + 1849 T^{4} )^{4} \) $47$ \( 1 - 6 T + 7 T^{2} + 690 T^{3} - 6420 T^{4} + 23196 T^{5} + 57521 T^{6} - 1846680 T^{7} + 13766039 T^{8} - 86793960 T^{9} + 127063889 T^{10} + 2408278308 T^{11} - 31327552020 T^{12} + 158248054830 T^{13} + 75454507303 T^{14} - 3039738722778 T^{15} + 23811286661761 T^{16} \) $53$ \( 1 + 12 T + 103 T^{2} - 420 T^{3} - 9840 T^{4} - 102312 T^{5} - 99751 T^{6} + 3945120 T^{7} + 55833959 T^{8} + 209091360 T^{9} - 280200559 T^{10} - 15231903624 T^{11} - 77642333040 T^{12} - 175642107060 T^{13} + 2282929196287 T^{14} + 14096533678044 T^{15} + 62259690411361 T^{16} \) $59$ \( 1 + 18 T + 239 T^{2} + 1374 T^{3} + 5292 T^{4} - 7812 T^{5} + 470053 T^{6} + 8281674 T^{7} + 95738891 T^{8} + 488618766 T^{9} + 1636254493 T^{10} - 1604420748 T^{11} + 64125074412 T^{12} + 982305986826 T^{13} + 10081147540199 T^{14} + 44795726726742 T^{15} + 146830437604321 T^{16} \) $61$ \( 1 - 18 T + 141 T^{2} + 538 T^{3} - 19524 T^{4} + 199396 T^{5} - 460901 T^{6} - 9059916 T^{7} + 127866487 T^{8} - 552654876 T^{9} - 1715012621 T^{10} + 45259103476 T^{11} - 270326199684 T^{12} + 454392809938 T^{13} + 7264372784901 T^{14} - 56569371048378 T^{15} + 191707312997281 T^{16} \) $67$ \( ( 1 + 19 T + 138 T^{2} + 1691 T^{3} + 21053 T^{4} + 113297 T^{5} + 619482 T^{6} + 5714497 T^{7} + 20151121 T^{8} )^{2} \) $71$ \( ( 1 + 8 T - 47 T^{2} - 434 T^{3} + 1365 T^{4} - 30814 T^{5} - 236927 T^{6} + 2863288 T^{7} + 25411681 T^{8} )^{2} \) $73$ \( 1 - 15 T + 208 T^{2} - 2445 T^{3} + 29070 T^{4} - 272130 T^{5} + 2789288 T^{6} - 23441760 T^{7} + 210261239 T^{8} - 1711248480 T^{9} + 14864115752 T^{10} - 105863196210 T^{11} + 825536865870 T^{12} - 5068660044885 T^{13} + 31477519068112 T^{14} - 165710977786455 T^{15} + 806460091894081 T^{16} \) $79$ \( 1 - 9 T - 11 T^{2} + 1446 T^{3} - 17334 T^{4} + 85743 T^{5} + 200726 T^{6} - 10774872 T^{7} + 128238887 T^{8} - 851214888 T^{9} + 1252730966 T^{10} + 42274642977 T^{11} - 675160704054 T^{12} + 4449423552954 T^{13} - 2673962010731 T^{14} - 172835180875431 T^{15} + 1517108809906561 T^{16} \) $83$ \( ( 1 - 9 T - 2 T^{2} + 765 T^{3} - 6719 T^{4} + 63495 T^{5} - 13778 T^{6} - 5146083 T^{7} + 47458321 T^{8} )^{2} \) $89$ \( ( 1 + 12 T - 50 T^{2} + 192 T^{3} + 16899 T^{4} + 17088 T^{5} - 396050 T^{6} + 8459628 T^{7} + 62742241 T^{8} )^{2} \) $97$ \( ( 1 + 7 T - 63 T^{2} + 185 T^{3} + 11276 T^{4} + 17945 T^{5} - 592767 T^{6} + 6388711 T^{7} + 88529281 T^{8} )^{2} \) show more show less
__label__pos
0.9971
Unable to establish a simple socket connection between publisher and subscriber over Wifi-Aware   Kiến thức lập trình I am trying to create a socket connection between publisher and subscriber using Wifi Aware technology. I am following the below link from android developers website: https://developer.android.com/develop/connectivity/wifi/wifi-aware#create_a_connection As stated above, I am sending a message from subscriber which is successfully received on server side but when I request Network after creating a server socket on publisher, I get neither an error nor any network is received/created: // Publisher callback method when publish called. When message send success it will then call server.accept() to listen public void onMessageReceived(PeerHandle peer, byte[] message, Context mContext) { ...... try{ publisherSocket = new ServerSocket(0); publisherPort = publisherSocket.getLocalPort(0); } catch(...) { .... } NetworkSpecifier networkSpecifier = new WifiAwareNetworkSpecifier.Builder(currentDiscoverySession,currentPublisherConnectedPeer).setPskPassphrase("some8Letter@").setPort(publisherPort).build; NetworkRequest publisherNetworkRequest = new NetworkRequest.Builder().addTransportType(NetworkCapabilities.TRANSPORT_WIFI_AWARE).setNetworkSpecifier(networkSpecifier).build(); ConnectivityManager.NetworkCallback callback = new ConnectivityManager.NetworkCallback(){ public void onAvailable(Network network) { // Problem is it was never called which means network not there currentDiscoverySession.sendMessage(currentPublisherConnectedPeer, 0, someDummyMessage); } .....}; // Here ConnectivityManager is obatined using this.getSystemService(this.CONNECTIVITY_SERVICE) on onCreate of activity connectivityManager.requestNetwork(publisherNetworkRequest, callback); // Subscribe callback method when subscribe called public void onMessageReceived(PeerHandle peer, byte[] message, Context mContext) { NetworkSpecifier networkSpecifier = new WifiAwareNetworkSpecifier.Builder(currentDiscoverySession,peer).setPskPassphrase("some8Letter@").build(); NetworkRequest subscriberNetworkRequest = new NetworkRequest.Builder().addTransportType(NetworkCapabilities.TRANSPORT_WIFI_AWARE).setNetworkSpecifier(networkSpecifier).build(); ConnectivityManager.NetworkCallback callback = new ConnectivityManager.NetworkCallback(){ public void onCapabilitiesChanged(Network network, NetworkCapabilities networkCapabilities){ ..... WifiAwareNetworkInfo subscriberNetworkInfo = (WifiAwareNetworkInfo) networkCapabilities.getTransportInfo(); subscriberSocket = network.getSocketFactory().createSocket(subscriberNetworkInfo.getPeerIpv6Addr(), subscriberNetworkInfo.getPort()); // WRite some message using outputStream .....}; My manifests file : <uses-permission android:name="android.permission.ACCESS_WIFI_STATE" /> <uses-permission android:name="android.permission.CHANGE_WIFI_STATE" /> <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" /> <uses-permission android:name="android.permission.INTERNET" /> <uses-permission android:name="android.permission.NEARBY_WIFI_DEVICES" /> <uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" android:maxSdkVersion="34"/> <uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" /> minSdk : 34 targetSdk : 34 I do not know why it is not returning any network over Wifi-Aware when I call requestNetworkMethod. I tried passing time in ms and saw that onUnavailable() method was called which basically meant that no such network is there/created. I want to know if is it even possible to create such a socket connection over Wifi-Aware and if that is the case then is there something wrong with my code or is it because of any hardware limitation and if so how can I know that? My ultimate goal is to create a video call app for the 2 apps to communicate over WIFI-Aware an my goal is to send and receive raw video bytes over this network without using any pre-existing protocols like WebRTC. If you find any problem with this approach then also please let me know. New contributor Alankrit Verma is a new contributor to this site. Take care in asking for clarification, commenting, and answering. Check out our Code of Conduct. LEAVE A COMMENT
__label__pos
0.813585
获取我们在 Firebase 峰会上发布的所有信息,了解 Firebase 可如何帮助您加快应用开发速度并满怀信心地运行应用。了解详情 开始使用 Cloud Firestore 安全规则 借助 Cloud Firestore 安全规则,您可以专注于构建出色的用户体验,而无需管理基础架构或编写服务器端身份验证和授权代码。 安全规则以简单而富有表现力的格式提供访问控制和数据验证。要构建基于用户和基于角色的访问系统以确保用户数据安全,您需要将Firebase 身份验证与 Cloud Firestore 安全规则结合使用。 安全规则版本 2 截至 2019 年 5 月,Cloud Firestore 安全规则第 2 版现已可用。规则的版本 2 更改了递归通配符{name=**}的行为。如果您计划使用集合组查询,则必须使用版本 2。您必须通过rules_version = '2';安全规则的第一行: rules_version = '2'; service cloud.firestore { match /databases/{database}/documents { 编写规则 所有 Cloud Firestore 安全规则都包含match语句,用于识别数据库中的文档,并allow表达式,用于控制对这些文档的访问: service cloud.firestore { match /databases/{database}/documents { match /<some_path>/ { allow read, write: if <some_condition>; } } } 在读取或写入任何数据之前,来自 Cloud Firestore 移动/Web 客户端库的每个数据库请求都会根据您的安全规则进行评估。如果规则拒绝访问任何指定的文档路径,则整个请求将失败。 以下是基本规则集的一些示例。虽然这些规则有效,但不建议将它们用于生产应用程序: 需要授权 // Allow read/write access on all documents to any user signed in to the application service cloud.firestore { match /databases/{database}/documents { match /{document=**} { allow read, write: if request.auth != null; } } } 全部拒绝 // Deny read/write access to all users under any conditions service cloud.firestore { match /databases/{database}/documents { match /{document=**} { allow read, write: if false; } } } 允许全部 // Allow read/write access to all users under any conditions // Warning: **NEVER** use this rule set in production; it allows // anyone to overwrite your entire database. service cloud.firestore { match /databases/{database}/documents { match /{document=**} { allow read, write: if true; } } } 上面示例中使用的{document=**}路径匹配整个数据库中的任何文档。继续阅读构建安全规则的指南,了解如何匹配特定数据路径和使用分层数据。 测试规则 Cloud Firestore 提供了一个规则模拟器,您可以使用它来测试您的规则集。您可以从 Firebase 控制台 Cloud Firestore 部分的规则选项卡访问模拟器。 规则模拟器允许您模拟经过身份验证和未经身份验证的读取、写入和删除。当您模拟经过身份验证的请求时,您可以构建和预览来自各种提供商的身份验证令牌。模拟请求针对您编辑器中的规则集运行,而不是您当前部署的规则集。 部署规则 在开始通过移动应用使用 Cloud Firestore 之前,您需要部署安全规则。您可以在 Firebase 控制台中或使用 Firebase CLI 部署规则。 Cloud Firestore 安全规则的更新最多可能需要一分钟才能影响新的查询和侦听器。但是,最多可能需要 10 分钟才能完全传播更改并影响任何活动的侦听器。 使用 Firebase 控制台 要设置和部署您的第一组规则,请打开 Firebase 控制台 Cloud Firestore 部分中的规则选项卡 在在线编辑器中编写您的规则,然后点击发布 使用 Firebase CLI 您还可以使用Firebase CLI部署规则。使用 CLI 允许您使用应用程序代码将规则置于版本控制之下,并将规则部署为现有部署过程的一部分。 // Set up Firestore in your project directory, creates a .rules file firebase init firestore // Edit the generated .rules file to your desired security rules // ... // Deploy your .rules file firebase deploy --only firestore:rules 增强云存储的安全性 您的应用将受益于 Cloud Firestore 强大的数据库功能以及 Cloud Storage 的文件存储和管理功能。结合使用,这些产品还可以增强应用程序安全性,因为 Cloud Firestore 可以捕获 Firebase 安全规则可用于这两种产品的授权要求。有关更多信息,请参阅云存储指南 下一步
__label__pos
0.9975
BlobDataURLcanvasimage的相互转换 函数都比较简单,直接看就ok了 /*-----------------------------------------------------------------------*/ // canvas转dataURL:canvas对象、转换格式、图像品质 function canvasToDataURL(canvas, format, quality){ return canvas.toDataURL(format||‘image/jpeg‘, quality||1.0); } // DataURL转canvas function dataURLToCanvas(dataurl, cb){ var canvas = document.createElement(‘CANVAS‘); var ctx = canvas.getContext(‘2d‘); var img = new Image(); img.onload = function(){ canvas.width = img.width; canvas.height = img.height; ctx.drawImage(img, 0, 0); cb(canvas); }; img.src = dataurl; } /*-----------------------------------------------------------------------*/ // image转canvas:图片地址 function imageToCanvas(src, cb){ var canvas = document.createElement(‘CANVAS‘); var ctx = canvas.getContext(‘2d‘); var img = new Image(); img.src = src; img.onload = function (){ canvas.width = img.width; canvas.height = img.height; ctx.drawImage(img, 0, 0); cb(canvas); }; } // canvas转image function canvasToImage(canvas){ var img = new Image(); img.src = canvas.toDataURL(‘image/jpeg‘, 1.0); return img; } /*-----------------------------------------------------------------------*/ // File/Blob对象转DataURL function fileOrBlobToDataURL(obj, cb){ var a = new FileReader(); a.readAsDataURL(obj); a.onload = function (e){ cb(e.target.resu< ); }; } // DataURL转Blob对象 function dataURLToBlob(dataurl){ var arr = dataurl.split(‘,‘); var mime = arr[0].match(/:(.*?);/)[1]; var bstr = atob(arr[1]); var n = bstr.len >h; var u8arr = new Uint8Array(n); while(n--){ u8arr[n] = bstr.charCodeAt(n); } return new Blob([u8arr], {type:mime}); } /*-----------------------------------------------------------------------*/ // Blob转image function blobToImage(blob, cb){ fileOrBlobToDataURL(blob, function (dataurl){ var img = new Image(); img.src = dataurl; cb(img); }); } // image转Blob function imageToBlob(src, cb){ imageToCanvas(src, function (canvas){ cb(dataURLToBlob(canvasToDataURL(canvas))); }); } /*-----------------------------------------------------------------------*/ // Blob转canvas function BlobToCanvas(blob, cb){ fileOrBlobToDataURL(blob, function (dataurl){ dataURLToCanvas(dataurl, cb); }); } // canvas转Blob function canvasToBlob(canvas, cb){ cb(dataURLToBlob(canvasToDataURL(canvas))); } /*-----------------------------------------------------------------------*/ // image转dataURL function imageToDataURL(src, cb){ imageToCanvas(src, function (canvas){ cb(canvasToDataURL(canvas)); }); } // dataURL转image,这个不需要转,直接给了src就能用 function dataURLToImage(dataurl){ var img = new Image(); img.src = d; return img; } /*-----------------------------------------------------------------------*/ Blob/DataURL/canvas/image的相互转换 原文地址:http://www.cnblogs.com/jyuf/p/7251591.html 最新回复(0) /jishu7OvI9BLGwL3hWdr10aPO2No3wIh0Hzynu8dsxQ_3D_3D4719438 8 简首页
__label__pos
0.999362
Fix: 0xc00001 error code in Windows 10, 8.1 Milan Stanojevic by Milan Stanojevic Deputy Editor Loading Comments Affiliate Disclosure Windows-10-version-1903 We’ve talked recently about issues that Windows 8 users are experiencing and provided a few ways to fix them. One such example was the Taskbar not responding problem in which we mentioned that in some cases, Windows 8 is not allowing users to properly repair their computer by returning a 0xc00001 error. Today we’re taking a look at how to resolve this Windows 8 problem. Usually, the 0xc00001 error code stands for a missing file in the installation media of Windows 10/8.1. There is no need to worry about this problem, as it can be easily bypassed. In some cases, this error is accompanied by BSOD (blue screen of death), but again, there is no need to worry, as it can be easily fixed. In some cases, you will need a Windows 8 installation disk or USB flash drive in order for this fix to work. If the following Windows 10/8.1 error is rendering your computer unusable: Your PC needs to be repaired The application or operating system couldn’t be loaded because a required file is missing or contains errors. File: windowssystem32windload.efi File:windowsSyse32windowsload.efi Error code:0xc0000001 Then the following guide will give you some information about the error and the steps necessary to fix this issue. How to fix Windows 10 error code 0xc00001 1. Create a recovery drive 2. Advanced startup 3. Copy the SAM File from the Repair Folder 4. Run the SFC scan 5. Check the hard drive 6. Run the DISM Solution 1 – Create a recovery drive If you don’t have your Windows 8 install drive, you can easily create a new one on a USB drive from any Windows 8 computer. To do so, open the Search charm and search for “recovery drive“. From the results, select “Create Recovery Drive“. When the wizard opens, make sure your USB drive is plugged in and tick the “Copy the recovery partition from the PC to the recovery drive” checkbox and follow the wizard. More information on creating a system restore USB drive can be found in this Microsoft help article. Solution 2 – Advanced startup Now that you have your USB recovery drive / Windows 8 installation media, you can move on to the next step. Plug in or insert your media and restart your computer. When you see the Post BIOS message press the Shirt and F8 keys to enter the Recovery mode. From here, select “See advanced repair option“. From here, select “Troubleshoot” and then go to “Advanced options“. From here, you can proceed in one of two ways: Method 1 Select “Automatic Repair” and let the install wizard take care of your issues by scanning the system files and replacing them. This option will work for more errors, not just the 0xc00001 Windows 8 error, so it’s good to take notice of it and keep it in mind when repairing Windows 8 computers. Method 2 Select “Command Prompt” which will launch the all too familiar CMD. In the console, type in the following commands (keep in mind to press the “Return” key after each command): • Bootrec /fixmbr0xc00001 Error bootrec • Bootrec /fixboot • Bootrec /rebuildbcd • exit Now restart your computer and see if the issue is resolved. This should take care of your issues, but if not, open the system in Recovery Mode again and perform another automatic repair. Solution 3 – Copy the SAM File from the Repair Folder If you’re still unable to login to Windows, try with copying the SAM file C:WINDOWSrepair to C:WINDOWSsystem32config. Here’s how to do that: 1. Insert your recovery drive and boot from it 2. Open the Command Prompt (as shown above) 3. If prompted whether you want to overwrite the original file, confirm it (enter “Y”) Solution 4 – Run the SFC scan The SFC scan is Microsoft’s built-in tool for resolving various problems in Windows. And it can be helpful when dealing with the 0xc00001 error, as well. Here’s how to run the SFC scan: 1. Open the Command Prompt 2. Enter the following line and press Enter: sfc/scannow0xc00001 Error sfc 3. Wait for the process to finish (it’s a lengthy one) 4. Restart your computer Solution 5 – Check the hard drive If something’s wrong with your hard drive, you may encounter the error code 0xc00001. So, we’re going to check if your hard drive is okay. And here’s how to do that: 1. Insert your Windows installation USB or DVD drive 2. Click Repair your computer, after selecting proper language 3. Select the drive you have Windows installed on (usually C:), and click Next 4. Choose Command Prompt when the System Recovery Options box appears 5. Enter the following line, and press Enter: chkdsk C: /f0xc00001 Error chkdsk 6. Wait for the process to finish 7. Restart your computer Solution 6 – Run the DISM Similarly to the SFC scan, DISM (Microsoft Windows Deployment Image Servicing and Management) is a built-in tool for dealing with various system errors in Windows. However, DISM is more powerful than the SFC scan, which means your chances of resolving this problem are bigger with DISM. You can easily run DISM even if you’re unable to boot your system. If you’re not sure how to do that, just follow these instructions: 1. Insert your installation USB or DVD and type following command: • DISM.exe /Online /Cleanup-Image /RestoreHealth /Source:C:RepairSourceWindows /LimitAccess0xc00001 Error DISM 2. Be sure to replace ”C:RepairSourceWindows” path of your DVD or USB. That’s about it for this article, we hope at least one of these solutions helped you deal with this annoying problem. If you have any comments, questions or suggestions, just let us know in the comments down below. Editor’s Note: This post was originally published in June 2014 and has been since completely revamped and updated for freshness, accuracy, and comprehensiveness. RELATED STORIES YOU NEED TO CHECK OUT:
__label__pos
0.562503
Skip site navigation (1)Skip section navigation (2) FreeBSD Manual Pages         home | help Log::Contextual::Role:UsertContributed Perl DoLog::Contextual::Role::Router(3) NAME Log::Contextual::Role::Router - Abstract interface between loggers and logging code blocks VERSION version 0.007000 SYNOPSIS package MyApp::Log::Router; use Moo; use Log::Contextual::SimpleLogger; with 'Log::Contextual::Role::Router'; has logger => (is => 'lazy'); sub _build_logger { return Log::Contextual::SimpleLogger->new({ levels_upto => 'debug' }); } sub before_import { my ($self, %export_info) = @_; my $exporter = $export_info{exporter}; my $target = $export_info{target}; print STDERR "Package '$target' will import from '$exporter'\n"; } sub after_import { my ($self, %export_info) = @_; my $exporter = $export_info{exporter}; my $target = $export_info{target}; print STDERR "Package '$target' has imported from '$exporter'\n"; } sub handle_log_request { my ($self, %message_info) = @_; my $log_code_block = $message_info{message_sub}; my $args = $message_info{message_args}; my $log_level_name = $message_info{message_level}; my $logger = $self->logger; my $is_active = $logger->can("is_${log_level_name}"); return unless defined $is_active && $logger->$is_active; my $log_message = $log_code_block->(@$args); $logger->$log_level_name($log_message); } package MyApp::Log::Contextual; use Moo; use MyApp::Log::Router; extends 'Log::Contextual'; #This example router is a singleton sub router { our $Router ||= MyApp::Log::Router->new } package main; use strict; use warnings; use MyApp::Log::Contextual qw(:log); log_info { "Hello there" }; DESCRIPTION Log::Contextual has three parts Export manager and logging method generator These tasks are handled by the "Log::Contextual" package. Logger selection and invocation The logging functions generated and exported by Log::Contextual call a method on an instance of a log router object which is responsible for invoking any loggers that should get an opportunity to receive the log message. The "Log::Contextual::Router" class implements the set_logger() and with_logger() functions as well as uses the arg_ prefixed functions to configure itself and provide the standard "Log::Contextual" logger selection API. Log message formatting and output The logger objects themselves accept or reject a log message at a certain log level with a guard method per level. If the logger is going to accept the log message the router is then responsible for executing the log message code block and passing the generated message to the logging object's log method. METHODS before_import($self, %import_info) after_import($self, %import_info) These two required methods are called with identical arguments at two different places during the import process. The before_import() method is invoked prior to the logging subroutines being exported into the target package and after_import() is called when the export is completed but before control returns to the package that imported the API. The arguments are passed as a hash with the following keys: exporter This is the name of the package that has been imported. It can also be 'Log::Contextual' itself. In the case of the synopsis the value for exporter would be 'MyApp::Log::Contextual'. target This is the package name that is importing the logging API. In the case of the synopsis the value would be 'main'. arguments This is a hash reference containing the configuration values that were provided for the import. The key is the name of the configuration item that was specified without the leading hyphen ('-'). For instance if the logging API is imported as follows use Log::Contextual qw( :log ), -logger => Custom::Logger->new({ levels => [qw( debug )] }); then $import_info{arguments}->{logger} would contain that instance of Custom::Logger. handle_log_request($self, %message_info) This method is called by "Log::Contextual" when a log event happens. The arguments are passed as a hash with the following keys exporter This is the name of the package that created the logging methods used to generate the log event. caller_package This is the name of the package that the log event has happened inside of. caller_level This is an integer that contains the value to pass to caller() that will provide information about the location the log event was created at. log_level This is the name of the log level associated with the log event. message_sub This is the message generating code block associated with the log event passed as a subref. If the logger accepts the log request the router should execute the subref to create the log message and then pass the message as a string to the logger. message_args This is an array reference that contains the arguments given to the message generating code block. When invoking the message generator it will almost certainly be expecting these argument values as well. AUTHOR Arthur Axel "fREW" Schmidt <[email protected]> COPYRIGHT AND LICENSE This software is copyright (c) 2016 by Arthur Axel "fREW" Schmidt. This is free software; you can redistribute it and/or modify it under the same terms as the Perl 5 programming language system itself. perl v5.32.0 2016-04-12 Log::Contextual::Role::Router(3) NAME | VERSION | SYNOPSIS | DESCRIPTION | METHODS | AUTHOR | COPYRIGHT AND LICENSE Want to link to this manual page? Use this URL: <https://www.freebsd.org/cgi/man.cgi?query=Log::Contextual::Role::Router&sektion=3&manpath=FreeBSD+12.2-RELEASE+and+Ports> home | help
__label__pos
0.918284
Solving systems Essay Solving systems of equations can be applied to many state of affairss. For illustration. systems of equations can be used to happen the optimum figure of points to bring forth to guarantee the highest profitableness of those peculiar points. Systems of equations can be solved by four methods: graphing. permutation. riddance. or with matrices. Which method do you prefer when work outing a system of equations? Why? What fortunes would do you to utilize a different method? Personally. the method a usage for any peculiar set of equations depends mostly on the dimension of the set of equations. That is. my pick depends on how many unknown variables there are and how many equations are in the set ; which would interpret to how many columns and how many rows. severally. when in matrix notation. When the figure of equations is merely approximately two or three. I prefer to utilize riddance. Elimination works best for these as it is really fast. because one time you pair off equations that look similar you can happen a variable you can insulate instantly. For more than three equations nevertheless. riddance does non work so really good ; I tend to travel around in circles and retrace my stairss and seek different attacks. and sometimes I even switch to permutation after I reach a dead terminal. For more or less the same grounds. I use riddance for when there are merely two or three unknown variables because you merely need a few stairss to insulate each one. and most times you do non necessitate to utilize all the given equations. For sets of equations with more than three equations and/or unknown variables though. I prefer to work out these utilizing matrices. We Will Write a Custom Essay Specifically For You For Only $13.90/page! order now Solving a set of equations with several variables and equations can be really simple when you convert them to matrix signifier and usage row decrease. It takes a small long sometimes but it ne’er gets confounding. all you need is to make is consistently work on column after column and by the clip you reach the terminal you can ever be certain that you have your solution set. Page TWO Deliverable Length: 250 words minimal. Detailss: The usage of sets is of import in many countries. such as market research. political relations. and medical specialty. For illustration. a college pupil may be in a peculiar set depending on a degree plan: Degree = { Associate. Bachelor. Master. Ph. D. } . A 2nd related set might be a listing of big leagues: Major = { Accounting. English. History. Math. Psychology. … } . Fit operations can besides be performed on these sets. For illustration. the brotherhood of the sets would be a listing of all grades and big leagues ; an intersection might be an Accounting PhD pupil ; the complement would be a member non in the set. The complement of the major set could be a Geology major. Supply a real-world illustration that shows 2 related sets. List all the members of your sets in set notation. A set can dwell of persons. objects. etc. Then. for that illustration show the brotherhood and intersection for those two sets. Show the complement of both single sets. I really stumbled across an article merely recently that was about a instance survey of first respondents. firemans specifically. who helped at the World Trade Center after the onslaught. shortly after and during the prostration. The survey was a biomonitoring ( proving of the internal dosage of chemicals or a metabolite ion organic structure matrices. which include blood and piss ) of the topics to see how much harmful chemicals were in their organic structures due to all the burning merchandises from the initial prostration and the fires that kept traveling for months on after. The survey tried to qualify the sum of chemical dose harmonizing to certain parametric quantities such as normal demographics ( age. gender ) and. more significantly. assigned undertakings and arrival clip. The assigned undertakings were to see whether what the firemans were making affected the internal doses. and this can be listed as the set: { deliverance. squad. marine units. ladder. engine. hose } . Each firefighter’s arrival clip. measured comparative to the existent prostration. was noted to see if the exposure to burning atoms got worse or better in the following twosome of yearss and these can be represented by the set: { nowadays at WTC prostration. reaching on yearss 1 or 2 postcollapse. reaching on yearss 3-7… } . The elements of this set travel on until the clip of the sampling. which was 3 hebdomads subsequently. The brotherhood of these sets would be the set of all firemans who responded to the scene of the WTC prostration. An intersection would be a fireman who worked the hosiery on the twenty-four hours of the prostration. A complement of the assigned undertakings set would be a fireman who arrived at the scene but possibly got injured and had to go forth without finishing a undertaking. and a complement of the arrival clip set would be a fireman who. for one ground or another. ne’er got to get on the scene. x Hi! I'm James! Would you like to get a custom essay? How about receiving a customized one? Check it out
__label__pos
0.914813
July 2000 Kernel Parameters v2.4.0 ~~~~~~~~~~~~~~~~~ The following is a consolidated list of the kernel parameters as implemented by the __setup() macro and sorted into English Dictionary order (defined as ignoring all punctuation and sorting digits before letters in a case insensitive manner), and with descriptions where known. The text in square brackets at the beginning of the description state the restrictions on the kernel for the said kernel parameter to be valid. The restrictions referred to are that the relevant option is valid if: ACPI ACPI support is enabled. APIC APIC support is enabled. APM Advanced Power Management support is enabled. AX25 Appropriate AX.25 support is enabled. CD Appropriate CD support is enabled. DEVFS devfs support is enabled. DRM Direct Rendering Management support is enabled. EFI EFI Partitioning (GPT) is enabled EIDE EIDE/ATAPI support is enabled. FB The frame buffer device is enabled. HW Appropriate hardware is enabled. IA-32 IA-32 aka i386 architecture is enabled. IA-64 IA-64 architecture is enabled. IP_PNP IP DCHP, BOOTP, or RARP is enabled. ISAPNP ISA PnP code is enabled. ISDN Appropriate ISDN support is enabled. JOY Appropriate joystick support is enabled. LP Printer support is enabled. LOOP Loopback device support is enabled. M68k M68k architecture is enabled. MCA MCA bus support is enabled. MDA MDA console support is enabled. MOUSE Appropriate mouse support is enabled. NET Appropriate network support is enabled. NFS Appropriate NFS support is enabled. PARIDE The ParIDE subsystem is enabled. PCI PCI bus support is enabled. PCMCIA The PCMCIA subsystem is enabled. PNP Plug & Play support is enabled. PPT Parallel port support is enabled. PS2 Appropriate PS/2 support is enabled. RAM RAM disk support is enabled. SCSI Appropriate SCSI support is enabled. SERIAL Serial support is enabled. SMP The kernel is an SMP kernel. SOUND Appropriate sound system support is enabled. V4L Video For Linux support is enabled. VGA The VGA console has been enabled. VT Virtual terminal support is enabled. XT IBM PC/XT MFM hard disk support is enabled. In addition, the following text indicates that the option: BUGS= Relates to possible processor bugs on the said processor. KNL Is a kernel start-up parameter. BOOT Is a boot loader parameter. Parameters denoted with BOOT are actually interpreted by the boot loader, and have no meaning to the kernel directly. Note that ALL kernel parameters listed below are CASE SENSITIVE, and that a trailing = on the name of any parameter states that that parameter will be entered as an environment variable, whereas its absence indicates that it will appear as a kernel argument readable via /proc/cmdline by programs running once the system is up. 53c7xx= [HW,SCSI] Amiga SCSI controllers. acpi= [HW,ACPI] Advanced Configuration and Power Interface acpismp=force [IA-32] Early setup parse and use ACPI SMP table. ad1816= [HW,SOUND] ad1848= [HW,SOUND] adb_buttons= [HW,MOUSE] adlib= [HW,SOUND] advansys= [HW,SCSI] aedsp16= [HW,SOUND] aha152x= [HW,SCSI] aha1542= [HW,SCSI] aic7xxx= [HW,SCSI] AM53C974= [HW,SCSI] amijoy= [HW,JOY] Amiga joystick support apm= [APM] Advanced Power Management. applicom= [HW] arcrimi= [HW,NET] ataflop= [HW,M68k] atarimouse= [HW,MOUSE] Atari Mouse. atascsi= [HW,SCSI] Atari SCSI. awe= [HW,SOUND] aztcd= [HW,CD] Aztec CD driver. baycom_epp= [HW,AX25] baycom_par= [HW,AX25] BayCom Parallel Port AX.25 Modem. baycom_ser_fdx= [HW,AX25] BayCom Serial Port AX.25 Modem in Full Duplex Mode. baycom_ser_hdx= [HW,AX25] BayCom Serial Port AX.25 Modem in Half Duplex Mode. bmouse= [HW,MOUSE,PS2] Bus mouse. bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards), most bttv.radio= important insmod options are available as kernel args too. bttv.pll= see Documentation/video4linux/bttv/Insmod-options bttv.tuner= and Documentation/video4linux/bttv/CARDLIST BusLogic= [HW,SCSI] cdu31a= [HW,CD] chandev= [HW,NET] cm206= [HW,CD] com20020= [HW,NET] com90io= [HW,NET] com90xx= [HW,NET] condev= [HW] console= [KNL] output console + comm spec (speed, control, parity). cpia_pp= [HW,PPT] cs4232= [HW,SOUND] cs89x0_dma= [HW,NET] ctc= [HW,NET] cyclades= [HW,SERIAL] Cyclades multi-serial port adapter. dasd= [HW,NET] db9= [HW,JOY] db9_2= [HW,JOY] db9_3= [HW,JOY] debug [KNL] Enable kernel debugging (events log level). decnet= [HW,NET] devfs= [DEVFS] digi= [HW,SERIAL] io parameters + enable/disable command. digiepca= [HW,SERIAL] dmascc= [HW,AX25,SERIAL] AX.25 Z80SCC driver with DMA support available. dmasound= [HW,SOUND] (sound subsystem buffers). dtc3181e= [HW,SCSI] eata= [HW,SCSI] eda= [HW,PS2] edb= [HW,PS2] eicon= [HW,ISDN] es1370= [HW,SOUND] es1371= [HW,SOUND] ether= [HW,NET] Ethernet cards parameters (irq, base_io_addr, mem_start, mem_end, name. (mem_start is often overloaded to mean something different and driver-specific). fd_mcs= [HW,SCSI] fdomain= [HW,SCSI] floppy= [HW] ftape= [HW] Floppy Tape subsystem debugging options. gamma= [HW,DRM] gc= [HW,JOY] gc_2= [HW,JOY] gc_3= [HW,JOY] gdth= [HW,SCSI] gpt [EFI] Forces disk with valid GPT signature but invalid Protective MBR to be treated as GPT. gscd= [HW,CD] gus= [HW,SOUND] gvp11= [HW,SCSI] hd= [EIDE] (E)IDE hard drive subsystem geometry (Cyl/heads/sectors) or tune parameters. hfmodem= [HW,AX25] hisax= [HW,ISDN] i810= [HW,DRM] ibmmcascsi= [HW,MCA,SCSI] IBM MicroChannel SCSI adapter. icn= [HW,ISDN] ide?= [HW] (E)IDE subsystem : config (iomem/irq), tuning or debugging (serialize,reset,no{dma,tune,probe}) or chipset specific parameters. idebus= [HW] (E)IDE subsystem : VLB/PCI bus speed. idle= [HW] in2000= [HW,SCSI] init= [KNL] initrd= [BOOT] Specify the location of the initial ramdisk. ip= [IP_PNP] isapnp= [ISAPNP] Specify RDP, reset, pci_scan and verbosity. isapnp_reserve_irq= [ISAPNP] Exclude IRQs for the autoconfiguration. isapnp_reserve_dma= [ISAPNP] Exclude DMAs for the autoconfiguration. isapnp_reserve_io= [ISAPNP] Exclude I/O ports for the autoconfiguration. Ranges are in pairs (I/O port base and size). isapnp_reserve_mem= [ISAPNP] Exclude memory regions for the autoconfiguration. Ranges are in pairs (memory base and size). isp16= [HW,CD] iucv= [HW,NET] js= [HW,JOY] Analog joystick kbd-reset [VT] keepinitrd [HW, ARM] load_ramdisk= [RAM] List of ramdisks to load from floppy. lockd.udpport= [NFS] lockd.tcpport= [NFS] logi_busmouse= [HW, MOUSE] lp=0 [LP] Specify parallel ports to use, e.g, lp=port[,port...] lp=none,parport0 (lp0 not configured, lp1 uses lp=reset first parallel port). 'lp=0' disables the lp=auto printer driver. 'lp=reset' (which can be specified in addition to the ports) causes attached printers to be reset. Using lp=port1,port2,... specifies the parallel ports to associate lp devices with, starting with lp0. A port specification may be 'none' to skip that lp device, or a parport name such as 'parport0'. Specifying 'lp=auto' instead of a port specification list means that device IDs from each port should be examined, to see if an IEEE 1284-compliant printer is attached; if so, the driver will manage that printer. ltpc= [HW] mac5380= [HW,SCSI] mac53c9x= [HW,SCSI] mad16= [HW,SOUND] maui= [HW,SOUND] max_loop=[0-255] [LOOP] Set the maximum number of loopback devices that can be mounted. maxcpus= [SMP] States the maximum number of processors that an SMP kernel should make use of. max_scsi_luns= [SCSI] mca-pentium [BUGS=IA-32] mcd= [HW,CD] mcdx= [HW,CD] md= [HW] RAID subsystems devices and level. mdisk= [HW] mdacon= [MDA] megaraid= [HW,SCSI] mem=exactmap [KNL,BOOT,IA-32] enable setting of an exact e820 memory map, as specified by the user. Such mem=exactmap lines can be constructed based on BIOS output or other requirements. mem=nn[KMG] [KNL,BOOT] force use of a specific amount of memory; to be used when the kernel is not able to see the whole system memory or for test. memfrac= [KNL] mga= [HW,DRM] mpu401= [HW,SOUND] msmouse= [HW,MOUSE] Microsoft Mouse. ncr5380= [HW,SCSI] ncr53c400= [HW,SCSI] ncr53c400a= [HW,SCSI] ncr53c406a= [HW,SCSI] ncr53c8xx= [HW,SCSI] netdev= [NET] Ethernet cards parameters (irq, base_io_addr, mem_start, mem_end, name. (mem_start is often overloaded to mean something different and driver-specific). (cf: ether=) nfsaddrs= [NFS] nfsroot= [NFS] nfs root filesystem for disk-less boxes. nmi_watchdog= [KNL,BUGS=IA-32] debugging features for SMP kernels. no387 [BUGS=IA-32] Tells the kernel to use the 387 maths emulation library even if a 387 maths coprocessor is present. noalign [KNL,ARM] noapic [SMP,APIC] Tells the kernel not to make use of any APIC that may be present on the system. noasync [HW, M68K] Disables async and sync negotiation for all devices. nocache [ARM] nodisconnect [HW,SCSI, M68K] Disables SCSI disconnects. nohlt [BUGS=ARM] no-hlt [BUGS=IA-32] Tells the kernel that the hlt instruction doesn't work correctly and not to use it. noht [SMP,IA-32] Disables P4 Xeon(tm) HyperThreading. noisapnp [ISAPNP] Disables ISA PnP code. noinitrd [RAM] Tells the kernel not to load any configured initial RAM disk. nointroute [IA-64] no-scroll [VGA] nosmp [SMP] Tells an SMP kernel to act as a UP kernel. nosync [HW, M68K] Disables sync negotiation for all devices. notsc [BUGS=IA-32] Disable Time Stamp Counter nowb [ARM] opl3= [HW,SOUND] opl3sa= [HW,SOUND] opl3sa2= [HW,SOUND] optcd= [HW,CD] panic= [KNL] kernel behaviour on panic. parport=0 [HW,PPT] Specify parallel ports. 0 disables. parport=auto Use 'auto' to force the driver to use parport=0xBBB[,IRQ[,DMA]] any IRQ/DMA settings detected (the default is to ignore detected IRQ/DMA settings because of possible conflicts). You can specify the base address, IRQ, and DMA settings; IRQ and DMA should be numbers, or 'auto' (for using detected settings on that particular port), or 'nofifo' (to avoid using a FIFO even if it is detected). Parallel ports are assigned in the order they are specified on the command line, starting with parport0. pas2= [HW,SOUND] pas16= [HW,SCSI] pcbit= [HW,ISDN] pcd. [PARIDE] pci=option[,option...] [PCI] various PCI subsystem options: off [IA-32] don't probe for the PCI bus bios [IA-32] force use of PCI BIOS, don't access the hardware directly. Use this if your machine has a non-standard PCI host bridge. nobios [IA-32] disallow use of PCI BIOS, only direct hardware access methods are allowed. Use this if you experience crashes upon bootup and you suspect they are caused by the BIOS. conf1 [IA-32] Force use of PCI Configuration Mechanism 1. conf2 [IA-32] Force use of PCI Configuration Mechanism 2. nosort [IA-32] Don't sort PCI devices according to order given by the PCI BIOS. This sorting is done to get a device order compatible with older kernels. biosirq [IA-32] Use PCI BIOS calls to get the interrupt routing table. These calls are known to be buggy on several machines and they hang the machine when used, but on other computers it's the only way to get the interrupt routing table. Try this option if the kernel is unable to allocate IRQs or discover secondary PCI buses on your motherboard. rom [IA-32] Assign address space to expansion ROMs. Use with caution as certain devices share address decoders between ROMs and other resources. irqmask=0xMMMM [IA-32] Set a bit mask of IRQs allowed to be assigned automatically to PCI devices. You can make the kernel exclude IRQs of your ISA cards this way. lastbus=N [IA-32] Scan all buses till bus #N. Can be useful if the kernel is unable to find your secondary buses and you want to tell it explicitly which ones they are. assign-busses [IA-32] Always assign all PCI bus numbers ourselves, overriding whatever the firmware may have done. pd. [PARIDE] pf. [PARIDE] pg. [PARIDE] pirq= [SMP,APIC] mp-table. plip= [PPT,NET] Parallel port network link. profile= [KNL] enable kernel profiling via /proc/profile (param:log level). prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk before loading. pss= [HW,SOUND] pt. [PARIDE] quiet= [KNL] Disable log messages. r128= [HW,DRM] raid= [HW,RAID] ramdisk= [RAM] Sizes of RAM disks in kilobytes [deprecated]. ramdisk_blocksize= [RAM] ramdisk_size= [RAM] New name for the ramdisk parameter. ramdisk_start= [RAM] Starting block of RAM disk image (so you can place it after the kernel image on a boot floppy). reboot= [BUGS=IA-32] reserve= [KNL,BUGS] force the kernel to ignore some iomem area. riscom8= [HW,SERIAL] ro [KNL] Mount root device read-only on boot. root= [KNL] root filesystem. rootflags= [KNL] set root filesystem mount option string rootfstype= [KNL] set root filesystem type rw [KNL] Mount root device read-write on boot. S [KNL] run init in single mode. sb= [HW,SOUND] sbpcd= [HW,CD] Soundblaster CD adapter. scsi_logging= [SCSI] scsihosts= [SCSI] sg_def_reserved_size= [SCSI] sgalaxy= [HW,SOUND] sim710= [SCSI,HW] sjcd= [HW,CD] smart2= [HW] sonicvibes= [HW,SOUND] sonycd535= [HW,CD] sound= [SOUND] soundmodem= [HW,AX25,SOUND] Use sound card as packet radio modem. specialix= [HW,SERIAL] Specialix multi-serial port adapter. sscape= [HW,SOUND] st= [HW,SCSI] SCSI tape parameters (buffers, etc.). st0x= [HW,SCSI] stram_swap= [HW] swiotlb= [IA-64] Number of I/O TLB slabs. switches= [HW, M68K] sym53c416= [HW,SCSI] sym53c8xx= [HW,SCSI] t128= [HW,SCSI] tdfx= [HW,DRM] tgfx= [HW,JOY] tgfx_2= [HW,JOY] tgfx_3= [HW,JOY] tmc8xx= [HW,SCSI] tmscsim= [HW,SCSI] tp720= [HW,PS2] trix= [HW,SOUND] u14-34f= [HW,SCSI] uart401= [HW,SOUND] uart6850= [HW,SOUND] usbfix [BUGS=IA-64] video= [FB] frame buffer configuration. vga= [BOOT] on ix386, select a particular video mode (use vga=ask for menu). This is actually a boot loader parameter; the value is passed to the kernel using a special protocol. See linux/Documentation/i386/boot.txt for information. vmhalt= [KNL,S390] vmpoff= [KNL,S390] waveartist= [HW,SOUND] wd33c93= [HW,SCSI] wd7000= [HW,SCSI] wdt= [HW] xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks. xd_geo= [HW,XT]
__label__pos
0.952462
One-Way ANOVA - Variance Analysis     Where Independent Samples T-test was used to compare a sample across two groups, there might be situations when a dependent variable might be categorized on more than two variables and then the sample is to be compared across three groups. for instance Comparing Work Stress in Junior, Middle and Senior level employees, comparing Job Satisfaction in College, Graduate and Postgraduate Education level employees or making comparison of Commitment to Change in professions as Doctors, Engineers, Teachers, Bankers and Marketers. It is important to understand that we have one variable that is categorized/divided into various groups/samples and those samples are then compared with each other. This is the ultimate objective of One Way ANOVA (Analysis of Variance).Example of research question: Is there a difference in optimism scores for young, middle-aged and old participants? What you need: Two variables: • One categorical independent variable with three or more distinct categories. This can also be a continuous variable that has been recoded to give three equal groups (e.g. age group: participants divided into three age categories, 29 and younger, between 30 and 44, 45 or above) • One continuous dependent variable (e.g. optimism scores). A few example scenarios/hypothesis in which we would use One-Way ANOVA are identified for the understanding of the readers 1. The average sale of the new brand of gasoline is same in all the 3 metro cities. 2. There are differences in Work Morale across 4 occupations. 3. Is there a change in confidence scores over the 3 time periods? It is important to note that in each of the above hypothesis, there is one continuous variable (Average Sale, Work Morale and Confidence Scores) that is compared across different groups (3 Metro Cities, 4 Occupations and 3 Time Periods). Now to run the One Way ANOVA, follow the following steps Click on the Analysis Tab, Select Compare Means > One-Way ANOVA Select the variable, that you would want to compare across different groups, In this case we would select stress with Intrinsic_Factors from the variable window and put it in the dependent list and would compare the variable across different occupations. The suggessted hypothesis for this test is that "The are differece in Stress with intrinsic factors across the 4 occupations". Adding the Variable the dialog box should look like Now Click on Option and Select Homogeneity of variance test and press continue Two Tables are shown in the output window, Here each of the tables are explained, the first table is the Test of Homogeneity of Variances, this table shows, if the Variances in the Data across the groups are similar or not, to explain it further, in this test we are checking Stress with intrinsic factors across the 4 occupations, Now the test would check if the Variances in the Data for Intrinsic Stress are same for each of the occupation groups i-e Banker, Teacher, Marketer and Engineer, if the value of Sig is greater than 0.05 we would say that Equal Variances are assumed, otherwise Equal Variances not Assumed. In this case we would say that variance in the data for stress are similar in the 4 occupations The Next table of ANOVA (Also shown below) shows that if differeces exist in the Stress with Intrinsic Job Factors across the four occupations or not, Sig value of 0.50 shows that there are differences across the four occupations, if the value would have been greater than 0.50, we would have inferred that there exist no differences in Stress with Intrinsic Factors across occupations. meaning all occupants feel similar kind of stress pertinent to the intrinsic job factors. Reporting ANOVA Table The ANOVA summary table suggests, the Stress relating to Intrinsic Job Factors across the four occupations under study differed significantly (F3,138 = 2.665, p = .050) Since now we know that differences do exist, we need to evaluate that between which occupations does the differences exist, and for this we would conduct a Post Hoc Analysis. for this purpose, Select One-Way ANOVA from the Menu and after selecting the Continuous variable add grouping variable, press Post Hoc button, you will see the following dialog box There are two groups, Equal Variances Assumed and Equal Variances Not Assumed, In this case the Test of Homogeneity of Variances revealed Equal Variance Assumed, so we select a test from Equal Variances Assumed, in this case we select LSD, you can select any, mostly LSD, Bonferroni, TUkey or Tukey's-b are used. After selection of LSD press continue, then press OK, apart from the other table new table of Multiple Comparisons is also displayed, that would make comparisons for Intrinsic Factors between each of the occupations. The above table shows the differences prevalent between two occupations, The Table Shows that No Differences exist in Stress with Intrinsic Factors between Banker and Teacher since the Sig. value is greater tha .05, however there are significant differences how Stresst with intrinsic factors affects Banker and Teacher, Since the Significance  (Sig.) value is less than .05. Reporting Multiple Comparisons Table Post-hoc analysis [LSD] were conducted to explore differences pertinent to Stresst with Intrinsic Job Factors among the four occupations groups. There was a significant difference between Banker and Marketing Job [mean difference = .47636, p < .01]. however no differences were recorded between anyother occupations. Finally we would reject the null hypothesis and accept the alternate hypothesis. Category: 
__label__pos
0.897599
nn_triplet_margin_with_distance_loss: Triplet margin with distance loss nn_triplet_margin_with_distance_lossR Documentation Triplet margin with distance loss Description Creates a criterion that measures the triplet loss given input tensors a, p, and n (representing anchor, positive, and negative examples, respectively), and a nonnegative, real-valued function ("distance function") used to compute the relationship between the anchor and positive example ("positive distance") and the anchor and negative example ("negative distance"). Usage nn_triplet_margin_with_distance_loss( distance_function = NULL, margin = 1, swap = FALSE, reduction = "mean" ) Arguments distance_function (callable, optional): A nonnegative, real-valued function that quantifies the closeness of two tensors. If not specified, nn_pairwise_distance() will be used. Default: None margin (float, optional): A non-negative margin representing the minimum difference between the positive and negative distances required for the loss to be 0. Larger margins penalize cases where the negative examples are not distant enough from the anchors, relative to the positives. Default: 1. swap (bool, optional): Whether to use the distance swap described in the paper Learning shallow convolutional feature descriptors with triplet losses by V. Balntas, E. Riba et al. If TRUE, and if the positive example is closer to the negative example than the anchor is, swaps the positive example and the anchor in the loss computation. Default: FALSE. reduction (string, optional): Specifies the (optional) reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Default: 'mean' Details The unreduced loss (i.e., with reduction set to 'none') can be described as: \ell(a, p, n) = L = \{l_1,…,l_N\}^\top, \quad l_i = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} where N is the batch size; d is a nonnegative, real-valued function quantifying the closeness of two tensors, referred to as the distance_function; and margin is a non-negative margin representing the minimum difference between the positive and negative distances that is required for the loss to be 0. The input tensors have N elements each and can be of any shape that the distance function can handle. If reduction is not 'none' (default 'mean'), then: \ell(x, y) = \begin{array}{ll} \mbox{mean}(L), & \mbox{if reduction} = \mbox{`mean';}\\ \mbox{sum}(L), & \mbox{if reduction} = \mbox{`sum'.} \end{array} See also nn_triplet_margin_loss(), which computes the triplet loss for input tensors using the l_p distance as the distance function. Shape • Input: (N, *) where * represents any number of additional dimensions as supported by the distance function. • Output: A Tensor of shape (N) if reduction is 'none', or a scalar otherwise. Examples if (torch_is_installed()) { # Initialize embeddings embedding <- nn_embedding(1000, 128) anchor_ids <- torch_randint(1, 1000, 1, dtype = torch_long()) positive_ids <- torch_randint(1, 1000, 1, dtype = torch_long()) negative_ids <- torch_randint(1, 1000, 1, dtype = torch_long()) anchor <- embedding(anchor_ids) positive <- embedding(positive_ids) negative <- embedding(negative_ids) # Built-in Distance Function triplet_loss <- nn_triplet_margin_with_distance_loss( distance_function = nn_pairwise_distance() ) output <- triplet_loss(anchor, positive, negative) # Custom Distance Function l_infinity <- function(x1, x2) { torch_max(torch_abs(x1 - x2), dim = 1)[[1]] } triplet_loss <- nn_triplet_margin_with_distance_loss( distance_function = l_infinity, margin = 1.5 ) output <- triplet_loss(anchor, positive, negative) # Custom Distance Function (Lambda) triplet_loss <- nn_triplet_margin_with_distance_loss( distance_function = function(x, y) { 1 - nnf_cosine_similarity(x, y) } ) output <- triplet_loss(anchor, positive, negative) } torch documentation built on Oct. 24, 2022, 5:08 p.m.
__label__pos
0.99934
$image) // see if files exist in content - we don't want to upload non-used selected files. if( strpos($_REQUEST['content'], $image) !== false ) { $upload = media_sideload_image($image, $post_ID, $_REQUEST['photo_description'][$key]); // Replace the POSTED content with correct uploaded ones. // escape quote for matching $quoted = preg_quote2($image); if( !is_wp_error($upload) ) $content = preg_replace('/]*)src=(\"|\')'.$quoted.'(\2)([^>\/]*)\/*>/is', $upload, $content); } // set the post_content and status $quick['post_status'] = isset($_REQUEST['publish']) ? 'publish' : 'draft'; $quick['post_content'] = $content; // error handling for $post if ( is_wp_error($post_ID)) { wp_die($id); wp_delete_post($post_ID); // error handling for media_sideload } elseif ( is_wp_error($upload)) { wp_die($upload); wp_delete_post($post_ID); } else { $quick['ID'] = $post_ID; wp_update_post($quick); } return $post_ID; } // For submitted posts. if ( 'post' == $_REQUEST['action'] ) { check_admin_referer('press-this'); $post_ID = press_it(); $posted = $post_ID; } // Set Variables $title = wp_specialchars(aposfix(stripslashes($_GET['t']))); $selection = trim( aposfix( stripslashes($_GET['s']) ) ); if ( ! empty($selection) ) { $selection = preg_replace('/(\r?\n|\r)/', ' ', $selection); $selection = ' '.str_replace(' ', '', $selection).' '; } $url = clean_url($_GET['u']); $image = $_GET['i']; if($_REQUEST['ajax']) { switch ($_REQUEST['ajax']) { case 'video': ?> <?php echo attribute_escape(__('Click to insert.')); ?> | ]*)src=(\"|\')([^<>]+?\.(png|jpeg|jpg|jpe|gif))[^<>\'\"]*(\2)([^>\/]*)\/*>/is'; preg_match_all($pattern, $content, $matches); if ( empty($matches[0]) ) return ''; $sources = array(); foreach ($matches[3] as $src) { // if no http in url if(strpos($src, 'http') === false) // if it doesn't have a relative uri if( strpos($src, '../') === false && strpos($src, './') === false && strpos($src, '/') === 0) $src = 'http://'.str_replace('//','/', $host['host'].'/'.$src); else $src = 'http://'.str_replace('//','/', $host['host'].'/'.dirname($host['path']).'/'.$src); $sources[] = clean_url($src); } return "'" . implode("','", $sources) . "'"; } $url = urldecode($url); $url = str_replace(' ', '%20', $url); echo 'new Array('.get_images_from_uri($url).')'; break; case 'photo_js': ?> // gather images and load some default JS var last = null var img, img_tag, aspect, w, h, skip, i, strtoappend = ""; var my_src = eval( jQuery.ajax({ type: "GET", url: "", cache : false, async : false, data: "ajax=photo_images&u=", dataType : "script" }).responseText ); if(my_src.length == 0) { var my_src = eval( jQuery.ajax({ type: "GET", url: "", cache : false, async : false, data: "ajax=photo_images&u=", dataType : "script" }).responseText ); if(my_src.length == 0) { strtoappend = ''; } } for (i = 0; i < my_src.length; i++) { img = new Image(); img.src = my_src[i]; img_attr = 'id="img' + i + '"'; skip = false; maybeappend = ''; if (img.width && img.height) { if (img.width >= 30 && img.height >= 30) { aspect = img.width / img.height; scale = (aspect > 1) ? (71 / img.width) : (71 / img.height); w = img.width; h = img.height; if (scale < 1) { w = parseInt(img.width * scale); h = parseInt(img.height * scale); } img_attr += ' style="width: ' + w + 'px; height: ' + h + 'px;"'; strtoappend += maybeappend; } } else { strtoappend += maybeappend; } } function pick(img, desc) { if (img) { if('object' == typeof jQuery('.photolist input') && jQuery('.photolist input').length != 0) length = jQuery('.photolist input').length; if(length == 0) length = 1; jQuery('.photolist').append(''); jQuery('.photolist').append(''); insert_editor( "\n\n" + encodeURI(' ' + desc + ' ')); } return false; } function image_selector() { tb_remove(); desc = jQuery('#this_photo_description').val(); src = jQuery('#this_photo').val(); pick(src, desc); jQuery('#extra_fields').hide(); jQuery('#extra_fields').html(''); return false; } jQuery(document).ready(function() { jQuery('#extra_fields').html(' Photo () '); jQuery('.close').click(function() { jQuery('#extra_fields').hide(); jQuery('#extra_fields').html(''); }); jQuery('.refresh').click(function() { show('photo'); }); jQuery('#img_container').html(strtoappend); jQuery('#photo_add_url').attr('href', '?ajax=photo_thickbox_url&height=200&width=500'); tb_init('#extra_fields .thickbox'); }); > <?php _e('Press This') ?> ID, false, false, $popular_ids) ?> 0, 'name' => 'newcat_parent', 'orderby' => 'name', 'hierarchical' => 1, 'show_option_none' => __('Parent category'), 'tab_index' => 3 ) ); ?> | | • Add: <?php _e('Insert an Image'); ?> • <?php _e('Embed a Video'); ?>
__label__pos
0.996513
JEP 452: Key Encapsulation Mechanism API OwnerWeijun Wang TypeFeature ScopeSE StatusClosed / Delivered Release21 Componentsecurity-libs / javax.crypto Discussionsecurity dash dev at openjdk dot org EffortM DurationM Reviewed byAlan Bateman, Sean Mullan Endorsed bySean Mullan Created2023/01/25 03:48 Updated2024/01/03 20:08 Issue8301034 Summary Introduce an API for key encapsulation mechanisms (KEMs), an encryption technique for securing symmetric keys using public key cryptography. Goals Non-Goals Motivation Key encapsulation is a modern cryptographic technique that secures symmetric keys using asymmetric or public key cryptography. The traditional technique for doing so is to encrypt a randomly generated symmetric key with a public key, but that requires padding and can be difficult to prove secure. A key encapsulation mechanism (KEM) instead uses properties of the public key to derive a related symmetric key, which requires no padding. The concept of a KEM was introduced by Crammer and Shoup in §7.1 of Design and Analysis of Practical Public-Key Encryption Schemes Secure against Adaptive Chosen Ciphertext Attack. Shoup later proposed it as an ISO standard in §3.1 of A Proposal for an ISO Standard for Public Key Encryption. It was accepted as ISO 18033-2 and published in May 2006. KEMs are a building block of Hybrid Public Key Encryption (HPKE). The NIST Post-Quantum Cryptography (PQC) standardization process explicitly calls for KEMs and digital signature algorithms to be evaluated as candidates for the next generation of standard public key cryptography algorithms. The Diffie-Hellman key exchange step in TLS 1.3 can also be modeled as a KEM. KEMs will be an important tool for defending against quantum attacks. None of the existing cryptographic APIs in the Java Platform is capable of representing KEMs in a natural way (see below). Implementors of third-party security providers have already expressed a need for a standard KEM API. It is time to add one to the Java Platform. Description A KEM consists of three functions: The key pair generation function is covered by the existing KeyPairGenerator API. We define a new class, KEM, for the encapsulation and decapsulation functions: package javax.crypto; public class DecapsulateException extends GeneralSecurityException; public final class KEM { public static KEM getInstance(String alg) throws NoSuchAlgorithmException; public static KEM getInstance(String alg, Provider p) throws NoSuchAlgorithmException; public static KEM getInstance(String alg, String p) throws NoSuchAlgorithmException, NoSuchProviderException; public static final class Encapsulated { public Encapsulated(SecretKey key, byte[] encapsulation, byte[] params); public SecretKey key(); public byte[] encapsulation(); public byte[] params(); } public static final class Encapsulator { String providerName(); int secretSize(); // Size of the shared secret int encapsulationSize(); // Size of the key encapsulation message Encapsulated encapsulate(); Encapsulated encapsulate(int from, int to, String algorithm); } public Encapsulator newEncapsulator(PublicKey pk) throws InvalidKeyException; public Encapsulator newEncapsulator(PublicKey pk, SecureRandom sr) throws InvalidKeyException; public Encapsulator newEncapsulator(PublicKey pk, AlgorithmParameterSpec spec, SecureRandom sr) throws InvalidAlgorithmParameterException, InvalidKeyException; public static final class Decapsulator { String providerName(); int secretSize(); // Size of the shared secret int encapsulationSize(); // Size of the key encapsulation message SecretKey decapsulate(byte[] encapsulation) throws DecapsulateException; SecretKey decapsulate(byte[] encapsulation, int from, int to, String algorithm) throws DecapsulateException; } public Decapsulator newDecapsulator(PrivateKey sk) throws InvalidKeyException; public Decapsulator newDecapsulator(PrivateKey sk, AlgorithmParameterSpec spec) throws InvalidAlgorithmParameterException, InvalidKeyException; } The getInstance methods create a new KEM object that implements the specified algorithm. The sender calls one of the newEncapsulator methods. These methods take the receiver's public key and return an Encapsulator object. The sender can then call one of that object's two encapsulate methods to get an Encapsulated object, which contains a SecretKey and a key encapsulation message. The encapsulate() method returns a key containing the full shared secret, with an algorithm name of "Generic". This key is usually passed to a key derivation function. The encapsulate(from, to, algorithm) method returns a key whose key material is a sub-array of the shared secret, with the given algorithm name. The receiver calls one of the newDecapsulator methods. These methods take the receiver's private key and return a Decapsulator object. The receiver can then call one of that object's two decapsulate methods, which take the received key encapsulation message and return the shared secret. The decapsulate(encapsulation) method returns the full shared secret with a "Generic" algorithm, while the decapsulate(encapsulation, from, to, algorithm) method returns a key with the user-specified key material and algorithm. A KEM algorithm can define an AlgorithmParameterSpec subclass to provide additional information to the full newEncapsulator method. This is especially useful if the same key can be used to derive shared secrets in different ways. Instances of an AlgorithmParameterSpec subclass should be immutable. If any of the information inside an AlgorithmParameterSpec object needs to be transmitted along with the key encapsulation message so that the receiver is able to create a matching decapsulator then it will be included as a byte array in the params field inside the Encapsulated result. In that case, the security provider should provide an AlgorithmParameters implementation using the same algorithm name as the KEM. The receiver can initiate such an AlgorithmParameters instance with the received params byte array and recover an AlgorithmParameterSpec object to be used when it calls the newDecapsulator method. Multiple concurrent invocations of the encapsulate or decapsulate methods of a particular Encapsulator or Decapsulator object, respectively, should be safe. Each invocation of an encapsulate method should generate a new shared secret and encapsulation. Here is an example using a hypothetical "ABC" KEM. Before the key encapsulation and decapsulation, the receiver generates an "ABC" key pair and publishes the public key. // Receiver side KeyPairGenerator g = KeyPairGenerator.getInstance("ABC"); KeyPair kp = g.generateKeyPair(); publishKey(kp.getPublic()); // Sender side KEM kemS = KEM.getInstance("ABC-KEM"); PublicKey pkR = retrieveKey(); ABCKEMParameterSpec specS = new ABCKEMParameterSpec(...); KEM.Encapsulator e = kemS.newEncapsulator(pkR, specS, null); KEM.Encapsulated enc = e.encapsulate(); SecretKey secS = enc.key(); sendBytes(enc.encapsulation()); sendBytes(enc.params()); // Receiver side byte[] em = receiveBytes(); byte[] params = receiveBytes(); KEM kemR = KEM.getInstance("ABC-KEM"); AlgorithmParameters algParams = AlgorithmParameters.getInstance("ABC-KEM"); algParams.init(params); ABCKEMParameterSpec specR = algParams.getParameterSpec(ABCKEMParameterSpec.class); KEM.Decapsulator d = kemR.newDecapsulator(kp.getPrivate(), specR); SecretKey secR = d.decapsulate(em); // secS and secR will be identical KEM configurations A single KEM algorithm can have multiple configurations. Each configuration can accept different types of public or private keys, use different methods to derive the shared secrets, and emit different key encapsulation messages. Each configuration should map to a specific algorithm that creates a fixed size shared secret and a fixed size key encapsulation message. The configuration should be unambiguously determined by three pieces of information: For example, the Kyber family of KEMs could have a single algorithm named "Kyber", but the implementation could support different configurations based on key types, e.g., Kyber-512, Kyber-768, and Kyber-1024. Another example is the RSA-KEM family of KEMs. The algorithm name could simply be "RSA-KEM", but the implementation could support different configurations based on different RSA key sizes and different key derivation function (KDF) settings. The different KDF settings could be conveyed via an RSAKEMParameterSpec object. In both cases, the configuration can only be determined after one of the newEncapsulator or newDecapsulator methods is called. Delayed provider selection The provider chosen for a given KEM algorithm can depend not only upon the name of the algorithm passed to a getInstance method but also upon the key passed to a newEncapsulator or newDecapsulator method. The selection of the provider is thus delayed until one of those methods is called, just as in other cryptographic APIs such as Cipher and KeyAgreement. Each call of a newEncapsulator or newDecapsulator method can select a different provider. You can discover which provider is selected via the providerName() methods of the Encapsulator and Decapsulator classes. The encapsulationSize() methods Some higher-level protocols concatenate key encapsulation messages with other data directly, without providing any length information. For example, Hybrid TLS Key Exchange concatenates two key encapsulation messages into a single key_exchange field, and RSA-KEM concatenates the key encapsulation message with the wrapped keying data. These protocols assume that the length of the key encapsulation message is fixed and well-known once the KEM configuration is fixed. We provide the encapsulationSize() methods to retrieve the size of the key encapsulation message in case an application needs to extract the key encapsulation message from such concatenated data. Shared secrets might not be extractable All existing KEM implementations return shared secrets in a byte array. However, a Java security provider might be backed by a native-code implementation and the shared secret might not be extractable. Therefore it is not always possible to return the shared secret in a byte array. For that reason, the encapsulate and decapsulate methods always return the shared secret in a SecretKey object. If the key is extractable, the format of the key must be "RAW" and its getEncoded() method must return either the full shared secret or the slice of the shared secret specified by the from and to parameters of an extended encapsulate or decapsulate method. If the key is not extractable, the key's getFormat() and getEncoded() methods must return null even though internally the key material is either the full shared secret or a slice of the shared secret. The KEM service provider interface (SPI) A KEM implementation must implement the KEMSpi interface: package javax.crypto; public interface KEMSpi { interface EncapsulatorSpi { int engineSecretSize(); int engineEncapsulationSize(); KEM.Encapsulated engineEncapsulate(int from, int to, String algorithm); } interface DecapsulatorSpi { int engineSecretSize(); int engineEncapsulationSize(); SecretKey engineDecapsulate(byte[] encapsulation, int from, int to, String algorithm) throws DecapsulateException; } EncapsulatorSpi engineNewEncapsulator(PublicKey pk, AlgorithmParameterSpec spec, SecureRandom sr) throws InvalidAlgorithmParameterException, InvalidKeyException; DecapsulatorSpi engineNewDecapsulator(PrivateKey sk, AlgorithmParameterSpec spec) throws InvalidAlgorithmParameterException, InvalidKeyException; } An implementation must implement the EncapsulatorSpi and DecapsulatorSpi interfaces, and return objects of these types from the engineNewEncapsulator and engineNewDecapsulator methods of its KEMSpi implementation. Calls to the secretSize, encapsulationSize, encapsulate, and decapsulate methods of Encapsulator and Decapsulator objects are delegated to the engineSecretSize, engineEncapsulationSize, engineEncapsulate, and engineDecapsulate methods in the EncapsulatorSpi and DecapsulatorSpi implementations. An implementation of the engineEncapsulate and engineDecapsulate methods must be able to encapsulate or decapsulate keys with a "Generic" algorithm, a from value of 0, and a to value of the shared secret’s length. Otherwise, it can throw an UnsupportedOperationException if the combination of arguments is not supported because, e.g., the algorithm name cannot be mapped to an internal key type, the size of the key does not match the algorithm, or the implementation does not support slicing the shared secret freely. Future Work Encryption options ISO 18033-2 defines an encryption option for the encapsulate function because some asymmetric ciphers allow scheme-specific options to be passed to the encryption algorithm. However, this option is not mentioned in either RFC 9180 or NIST's PQC KEM API Notes, so we do not include it here. If a compelling case for an algorithm that requires this option arises then a future enhancement could introduce another overload of the encapsulate method that allows the inclusion of algorithm-specific parameters. AuthEncap and AuthDecap functions RFC 9180 defines two optional KEM functions, AuthEncap and AuthDecap, which allow the sender to provide its own private key during the encapsulation process so that the receiver can be assured that the shared secret was generated by the holder of that private key. However, these two functions do not appear in any other KEM definitions, so we do not include them here. Support for these functions could be added in a future enhancement. Alternatives Use existing APIs We considered using the existing KeyGenerator, KeyAgreement, and Cipher APIs to represent KEMs, but each of them has significant issues. Either they don't support the required feature set, or the API does not match the KEM functions. In short, each of these alternatives would be a hack to work around an API that was not designed to represent a KEM. Extra classes and methods would be required, and the implementations would be complex and fragile. Without a standard KEM API, security providers are likely to implement KEMs in inconsistent and awkward ways which will be difficult for developers to use. Include a key pair generation function All KEM definitions contain a key pair generation function. We could have included such a function in the KEM API, but we chose not to do so since the existing KeyPairGenerator API was specifically designed for this purpose. Including an identical function in the KEM API could lead to confusion for provider implementors and for developers. Testing We will add conformance tests on input, output, and exceptions, and the DHKEM known-answer tests from RFC 9180.
__label__pos
0.97343
6 4 2015 0 Thusc T3: bzoj 4104[Thu Summer Camp 2015]解密运算 首先对于一个按行向量排序的循环矩阵,任意两列都是一一对应的关系,于是我们只需要找到每行第一列和最后一列的对应关系就行了 结论1 : 第一行就是最后一行排序的结果 结论2 :这个关系是一一对应的,而且对应后的逆结果便是原序列按照字典序第一关键字,位置第二关键字排序之后的位置 这样暴力sort可以解决,然后还可以高大上的用基数排序搞到O(n) //coder: davidwang #include <cstdio> #include <cstdlib> #include <cstring> #include <algorithm> #define X first #define Y second using namespace std; #define LL long long inline int read(){ int ret=0,f=1; char ch; for (ch=getchar();ch<'0' || ch>'9';ch=getchar()) if (ch=='-') f=-f; for (;'0'<=ch && ch<='9';ch=getchar()) ret=ret*10+ch-48; return ret*f; } const int N = 200010; pair<int,int>a[N]; int b[N],n,m; int main() { n=read(); m=read(); n++; for (int i=1;i<=n;i++) b[i]=read(),a[i]=make_pair(b[i],i); sort(a+1,a+n+1); for (int now=a[a[1].Y].Y;b[now];now=a[now].Y){ printf("%d ",b[now]); } } //coder: davidwang #include <cstdio> #include <cstring> #include <cstdlib> #include <algorithm> #define X first #define Y second using namespace std; #define LL long long inline int read(){ int ret=0,f=1; char ch; for (ch=getchar();ch<'0' || ch>'9';ch=getchar()) if (ch=='-') f=-f; for (;'0'<=ch && ch<='9';ch=getchar()) ret=ret*10+ch-48; return ret*f; } const int N = 200010; int n,m,a[N],cnt[N],next[N],now; int main() { #ifndef ONLINE_JUDGE freopen("a.in","r",stdin); freopen("a.out","w",stdout); #endif n=read(); m=read(); for (int i=1;i<=n+1;i++){ a[i]=read(); cnt[a[i]]++; if (a[i]==0) now=i; } for (int i=1;i<=m;i++) cnt[i]+=cnt[i-1]; for (int i=m;i;i--) cnt[i]=cnt[i-1]; cnt[0]=0; for (int i=1;i<=n+1;i++) next[++cnt[a[i]]]=i; int i=1; for (now=next[now],i=1;i<=n;i++,now=next[now]) printf("%d ",a[now]); puts(""); return 0; }   发现刚才头文件比代码要长好多不科学,然后就缩减了一下……当时这么简单的题目都没能做出来....... Category: 未分类 | Tags: bzoj THOI | Read Count: 435 登录 * loading captcha image... (输入验证码) or Ctrl+Enter Host by is-Programmer.com | Power by Chito 1.3.3 beta | Theme: Aeros 2.0 by TheBuckmaker.com
__label__pos
0.950852
Número romano MMMDCCLXXXVI en número arábigo ¿Qué valor tiene MMMDCCLXXXVI en números romanos? El número romano MMMDCCLXXXVI equivale al 3786. Por lo tanto, si se desea escribir mediante símbolos romanos el dígito 3786 debe usarse la combinación de letras MMMDCCLXXXVI. Esta combinación de letras equivale en escritura romana al número arábigo tres mil setecientos ochenta y seis. MMMDCCLXXXVI = 3786 ¿Cómo se debe leer el número romano MMMDCCLXXXVI? Se debe tener en cuenta que las letras romanas que simbolizan números deben ser leídas y escritas de izquierda a derecha y en el orden de mayor a menor. En el caso de que número romano MMMDCCLXXXVI se encuentre en un texto este debería leerse como un número natural, es decir, en este caso debe ser leído como “Tres mil setecientos ochenta y seis”. ¿Calculadora de números romanos a arábigos? Si deseas calcular el valor de alguna cifra diferente a MMMDCCLXXXVI escrita en nomenclatura romana puedes utilizar nuestra calculadora online de números romanas a cifras arábigas de forma sencilla. MMMDCCLXXXVI Subir
__label__pos
0.865734
Comment résoudre UnhandledPromiseRejectionWarning chez la mongoose? J’essaie de récupérer des données en utilisant de la mongoose. Donc, chaque fois que je dois récupérer les messages de l’API – localhost: 3000 / api / posts – j’obtiens l’erreur suivante que je suis incapable de décrypter. (node: 12100) UnhandledPromiseRejectionWarning: rejet de promesse non gérée (r ID d’éjection: 1): [MongoError: connect ETIMEDOUT xxxx] Le foll est mon code dans le fichier api.js. J’apprécierais si vous pouviez me guider sur les points qui me dérangent. const express = require('express'); const router = express.Router(); const mongoose = require('mongoose'); const post = require('../models/post'); const db = "mongodb://:@xxxxxxx.mlab.com:xxxxxx/xxxxxx"; mongoose.Promise = global.Promise; mongoose.connect(db, function(err) { if(err) { console.log('Connection error'); } }); router.get('/posts', function(req, res) { console.log('Requesting posts'); post.find({}) .exec(function(err, posts) { if (err) { console.log('Error getting the posts'); } else { res.json(posts); console.log(posts); } }); }); //in order for server.js file to access the api, we have to add the foll line module.exports = router; 23 mai 2017 Maintenant, je reçois également un avertissement de dépréciation alors qu’en fait, j’ai inclus le loc suivant: mongoose.Promise = global.Promise; //we add this because if we dont, you may get a warning that mongoose's default promise library is deprecated J’apprécierais si je pouvais obtenir des conseils sur cette question. Merci Vous avez besoin d’un gestionnaire de rejet pour votre code, par exemple: router.get('/posts', function(req, res) { console.log('Requesting posts'); post.find({}) .exec() .then(function(posts) { res.json(posts); console.log(posts); }) .catch(function(error){ console.log('Error getting the posts'); }); }); Ou n’utilisez pas de chaîne de promesse, utilisez simplement la fonction de rappel: router.get('/posts', function(req, res) { console.log('Requesting posts'); post.find({}, function(err, posts){ if (err) { console.log('Error getting the posts'); } else { res.json(posts); console.log(posts); } }) }); La bonne façon de le gérer est d’append une clause catch. const mongoose = require('mongoose'); mongoose.connect(process.env.MONGODB_URI).catch(function (reason) { console.log('Unable to connect to the mongodb instance. Error: ', reason); }); Ajouter ma réponse car les autres ne donnent pas une image claire. Puisque vous rendez la mongoose disponible en tant que promesse globale mongoose.Promise = global.Promise vous devrez gérer la promesse en utilisant .then() et .catch() Voici comment: ... mongoose.Promise = global.Promise; mongoose.connect(db) .then(res => console.log("Connected to DB")) .catch(err => console.log(err)) ...
__label__pos
0.923349
Dominance When considering  functions made up of the sums, differences, products or quotients of  different sorts of functions (polynomials, exponentials and logarithms), or different powers of the same sort of function we  say that one function dominates the other. This means that as x approaches infinity or negative infinity, the graph will eventually look like the dominating function. • Exponentials dominate polynomials, • Polynomials dominate logarithms, • Among exponentials, larger bases dominate smaller, • Among polynomials, higher powers dominate lower, For example, consider the function x{{e}^{x}}. The exponential function dominates the polynomial. As x\to \infty , the graph looks like an exponential approaching infinity; that is, \displaystyle \underset{x\to \infty }{\mathop{\lim }}\,x{{e}^{x}}=\infty . As  x\to -\infty the graph looks like an exponential with very small but still positive values; that is \displaystyle \underset{x\to -\,\infty }{\mathop{\lim }}\,x{{e}^{x}}=0. Another example, consider a rational function (the quotient of two polynomials). If the numerator is of higher degree than the denominator, as x\to \pm \infty  the numerator dominates and the limit is infinite. If the denominator is of higher degree, the denominator dominates and the limit is zero. (And if they are of the same degree, then the limit is the ratio of the leading coefficients. Dominance does not apply.) Dominance works in other ways as well. Consider the graphs of y=3{{x}^{2}} and y={{2}^{x}}. In a standard graphing window the graphs appear to intersect twice. But on the right side the exponential function is lower, than the polynomial. Look farther out and farther up, the exponential dominates and will eventually lie above the polynomial   (after x = 7.334). Here’s an example that pretty much has to be done using the dominance approach. \displaystyle \underset{x\to \infty }{\mathop{\lim }}\,\frac{\ln \left( {{x}^{5}} \right)}{{{x}^{0.02}}}=0 The polynomial function in the denominator, even with the very small exponent, will dominate the logarithm function. The denominator will eventually get larger than the numerator and drive the quotient towards zero. We will return to this function when we know about finding maximums and points of inflection and find where it starts decreasing. For more on this see my post Far Out! 4 thoughts on “Dominance 1. You actually make it seem so easy with your presentation but I find this matter to be really something that I think I would never understand. It seems too complex and very broad for me. I’m looking forward for your next post, I will try to get the hang of it! Like 2. Pingback: Far Out! | Teaching Calculus 3. It’s a pity you don’t have a donate button! I’d definitely donate to this fantastic blog! I guess for now i’ll settle for bookmarking and adding your RSS feed to my Google account. I look forward to brand new updates and will share this blog with my Facebook group. Talk soon! Like Leave a Reply Fill in your details below or click an icon to log in: WordPress.com Logo You are commenting using your WordPress.com account. Log Out /  Change ) Google photo You are commenting using your Google account. Log Out /  Change ) Twitter picture You are commenting using your Twitter account. Log Out /  Change ) Facebook photo You are commenting using your Facebook account. Log Out /  Change ) Connecting to %s This site uses Akismet to reduce spam. Learn how your comment data is processed.
__label__pos
0.622666
Complete Communications Engineering The SHA-1 Hashing algorithm specifies a Secure Hash Algorithm, which can be used to generate a condensed representation of a message called a message digest. The algorithm is required for use with the Digital Signature Algorithm (DSA) as specified in the Digital Signature Standard (DSS) and whenever a secure hash algorithm is required. Both the transmitter and intended receiver of a message in computing and verifying a digital signature use this method. The four hash functions that comprise SHA-2 are SHA-224, SHA-256 (SHA 256 Algorithm), SHA-384, and SHA-512, with the numeric portion of the name indicating the number of bits in the key. SHA-2 functions are more secure than SHA-1 although not as widely used currently. SHA-1 Algorithm SHA-1 Hash is used for computing a condensed representation of a message or a data file. When a message of any length < 2^64 bits is input, the Hash algorithm produces a 160-bit output called a message digest. The message digest can then be input to the Digital Signature Algorithm (DSA), which generates or verifies the signature for the message. Signing the message digest rather than the message often improves the efficiency of the process because the message digest is usually much smaller in size than the message. The same hash algorithm must be used by the verifier of a digital signature as was used by the creator of the digital signature. SHA-1 is no longer considered secure.  As designed, it was thought to be computationally infeasible to find a message which corresponds to a given message digest, or to find two different messages which produce the same message digest. Any change to a message in transit would, with very high probability, result in a different message digest, and the signature would fail to verify. It has since been found to have meaningful weaknesses.  SHA-1 is a technical revision of SHA (FIPS 180). A circular left shift operation has been added to the SHA (FIPS 180). SHA-1 improves the security provided by the SHA standard. The SHA-1 is based on principles similar to those used by the MD4 message digest algorithm. Features More Information supported-platforms
__label__pos
0.992263
Combining dynamic text boxes based on their width Hey everyone. I need to be able to combine two or more dynamic text boxes in my flash template, based on information sent from the cgData when playing the template. Information is sent in (i.e. first name in regular font & last name in bold font), and I would like based on the width of each text box to display the first name aligned left, and the last name immediately next to it, so as to appear as the full name. I have managed to make the text boxes expandable so as to grow or shrink to the contents (by double clicking on the bottom right of the text boxes so as to unlock the fixed width handle). I then inside an .as file assign the x position of the last_name to first_name.x + first_name.textWidth and works fine using dummy text inside the .fla. When I try to call this template inserting it with cgData however, the calculations are wrong since it is still calculating based on the dummy text in the .fla, and not the width of the data that is sent in. I know this is more of an actionscript question, but is there somewhere i.e. a function or something that I can override in Caspar, where I can call my resize methods after the data is sent in and the values are the ones sent in? I have tried overriding the SetData method, but still no use: public class Main extends CasparTemplate { override public function SetData(xmlData:XML):void { super.SetData(xmlData); Text.last_name.x = Text.first_name.x + Text.first_name.textWidth + 10; } } I don’t think that works. Also override postInitialize and set the autoSize property: Text.last_name.autoSize = TextFieldAutoSize.LEFT; That makes the field adapt it’s width to the text. Be sure to NOT set the line to wrap. Then use width instead of textWidth: Text.last_name.x = Text.first_name.x + Text.first_name.width + 10; That should do it. Dear didikunz, When I add this line of code to one template: text1.f0.autoSize = TextFieldAutoSize.LEFT; when I test inside Animate, it works great. But when I try to generate the template with template generator, I get the following Error: 1120: Access of Undefined Property TextFieldAutoSize. Any suggestions? Thanks in Advance. If I understood correctly you wish to have different styles in the same text box. With Actionscript you can put HTML formatted text and so change styles in the middle of the text with tags. Unfortunately I cannot find the working prototype, so I made a little search and should be something like this var myText:String = "<p>This is <b>some</b> content to <i>render</i> as <u>HTML</u> text.</p>"; myTextBox.htmlText = myText; well I do remember the .htmlText but not much more I hope it helps Adobe Help Text Reference Is text1.f0 a movieClip with a textfied inside? text1 would be the movieClip and f0the TextField, right? If yes, I don’t understand, why it should not work. Hi Didikunz, Thanks a lot for your prompt reply and the help, I finally figured it out, I forgot to import flash.textFieldAutoSize. Now it’s working fine, but something is messing with the mask that I’ve added, when timeline reaches the first stop() action, when trying to play out animation, it just jumps to the last frame instead of playing all the frames. I’ll upload also the .fla file tomorrow .
__label__pos
0.992924
Difference Between Similar Terms and Objects Difference Between AES and Twofish AES vs Twofish The Advanced Encryption Standard, or AES, is currently the latest standard that has been adopted by United States government for encrypting top secret information. The label of AES was not initially meant for a single encryption method; instead it was a competition between many. Among the five finalists are Rijndael and Twofish. Rjindael won and was adopted as AES while Twofish obviously did not. AES is a block cipher and uses a substitution-permutation network for encrypting the data. On the other hand, Twofish uses a Feistel network to accomplish the same task. This means that Twofish is very similar, albeit much more complex than the older standards DES (Data Encryption Standard) and 3DES (Triple DES). Despite being similar to the older DES encryption, Twofish is unbreakable; even in theoretical perspective. AES is also a very robust encryption standard especially with very long key lengths. There are instances though where AES encryption is breakable. It is not very alarming though as the break was done on the 8 rounds version, which is not in use. There is still no proven attack where data was actually gathered by breaking the AES encryption. Depending on the length of the key, AES implements different number of round of encryption. For key sizes of 128 bits, 192 bits, and 256 bits, the number of rounds is 10, 12, and 14 respectively. Twofish does not vary the number of rounds for any key size. Instead, it uses a fixed number of 16, regardless. Probably the primary reason why Rijndael was chosen for AES rather than Twofish is the fact that it is very efficient when it comes to hardware. It requires less memory and fewer cycles in order to encrypt data. Although the impact is smaller on high-end devices, for low end devices the gap can be very significant. Although it might seem like breaking the cipher is the only way to break into a secure system. It is actually the most difficult way to do it; especially when you are facing a very tough encryption standard like both AES and Twofish are. Summary: 1. AES is actually Rijndael while Twofish is one of the AES finalist 2. AES uses the substitution-permutation network while Twofish uses the Feistel network 3. AES is breakable in some forms while Twofish is 4. AES implements fewer rounds than Twofish 5. AES is more efficient than Twofish Search DifferenceBetween.net : Email This Post Email This Post : If you like this article or our site. Please spread the word. Share it with your friends/family. Leave a Response Please note: comment moderation is enabled and may delay your comment. There is no need to resubmit your comment. Articles on DifferenceBetween.net are general information, and are not intended to substitute for professional advice. The information is "AS IS", "WITH ALL FAULTS". User assumes all risk of use, damage, or injury. You agree that we have no liability for any damages. See more about : , Protected by Copyscape Plagiarism Finder
__label__pos
0.894554
Sign up × Mathematics Stack Exchange is a question and answer site for people studying math at any level and professionals in related fields. It's 100% free, no registration required. Let $U\subset \mathbb{R}^k$ be an open set, $n>k$ and $\varphi_1,\varphi_2 : U\to \mathbb{R}^n$ be immersions, meaning continuously differentiable such that the differential taken in any point of $U$ is injective. Also, let $\varphi_1(U)=\varphi_2(U)=:U'$ and $\varphi_1$ and $\varphi_2$ be injective (and homeomorphisms $U\to U'$). Then $\varphi_1^{-1} \circ \varphi_2 : U\to U$ is a diffeomorphism. My problem is that $\varphi_1^{-1}$ need not be differentiable in the usual analysis sense, because $U'$ is not an open subset of $\mathbb{R}^n$. I am guessing that $d (\varphi_1^{-1}\circ \varphi_2)(x)$ should be $d(\varphi_1)(\varphi_1^{-1}(\varphi_2(x)))^{-1}\cdot d(\varphi_2)(x)$, because this is what happens if $k=n$ and it is still defined for $k<n$. However, I would need a way to apply the theorem on inverse functions to $\varphi_1$. share|cite|improve this question 2 Answers 2 up vote 1 down vote accepted I managed to solve this after all now (which is in part thanks to Sam's post, but I use a different idea). Let $x\in U$ and $a:=\varphi_1(x)\in U'$. Choose $y\in U$ with $\varphi_2(y)=a$. It is enough to show that there is an open neighbourhood $V$ of $a$ in $U'$ such that $\varphi_1^{-1} \circ \varphi_2 : \varphi_2^{-1}(V)\to U$ is differentiable (by symmetry and because differentiability is a local property). Without loss of generality, let the first $k$ rows of $d\varphi_1(x)$ be independent. I want to use the fact that for suitably small $V$ as above, $V=W\cap U'$ with $W$ open in $\mathbb{R}^n$, there is a diffeomorphism $\Phi:W\to W'\subseteq \mathbb{R}^n$ such that $\Phi (V) = W'\cap \{ (x_1,..,x_n)\in W'\, |\, x_{k+1},..,x_n = 0 \}$. In fact, one can be chosen as $\Phi: (x_1,..,x_k,x_{k+1},..,x_n)\mapsto (x_1,..,x_k,x_{k+1}-\varphi_{1,k+1}(\psi(x_1,..,x_k)),..,x_{n}-\varphi_{1,n}(\psi(x_1,..,x_k)))$ where, using the inverse function theorem, $\psi$ is the locally defined inverse function of $(\varphi_{1,1},..,\varphi_{1,k})$. (The differential of $\Phi$ is everywhere lower triangular with determinant 1.) Let $p:\mathbb{R}^n\to\mathbb{R}^k$ be the projection to the first $k$ coordinates. The map $f:=p\circ \Phi \circ \varphi_1$ (defined on a suitable open set in $U$ containing $x$ and mapping to $\mathbb{R}^k$) is differentiable and injective. For any $x'$ close to $x$, $df(x') = dp(\Phi(\varphi_1 (x'))) \cdot d\Phi (\varphi_1 (x')) \cdot d\varphi_1 (x')$. Now the first $k$ rows of $d\varphi_1(x')$ are independent (since $x'$ is close to $x$ and this is the case for $d\varphi_1(x)$) and for $1\leq i\leq k$, the $i$th row of $d \Phi$ taken at any point is simply a $1$ at the $i$th coordinate and zeroes otherwise. The same goes for $dp$. So this composition of three matrices still has its first $k$ rows independent, so it is an invertible $k\times k$ matrix. It follows that $f$ is a diffeomorphism onto its image. $\varphi_1^{-1}\circ \varphi_2$ equals the composition $f^{-1} \circ p \circ \Phi \circ \varphi_2$ on a suitably small neighbourhood of $y$ and all these maps are differentiable. share|cite|improve this answer Your maps $\phi_1, \phi_2$ are usually called embeddings (= injective proper immersions). Are you familiar with abstract manifolds? This is in some sense the appropriate language to discribe the map $\phi^{-1}: U' \rightarrow U$ and its differential $d \phi^{-1}(p)$ (this would be a map from the tangent space of $U'$ at $p$ onto $\mathbb{R}^k$). For an elementary treatment, try to extend the map $\phi_1$ to a map $$\tilde{\phi}_1 : U \times \mathbb{R}^{n-k},\,\,\,\, \tilde{\phi}_1(x,y) = \phi_1(x) + A(x)y$$ where $A(x) \in \mathbb{R}^{(n-k)\times k}$ is a smooth map of matrices, such that $d\tilde{\phi_1}(x,0) = d\phi_1(x) + A(x)$ is an isomorphism for every $x \in U$. Do the same for $\phi_2$ and then argue by the inverse function theorem. share|cite|improve this answer Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.964283
A toolkit for working with phylogenetic data. v0.20.0  All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages utils/text/table.cpp Go to the documentation of this file. 1 /* 2  Genesis - A toolkit for working with phylogenetic data. 3  Copyright (C) 2014-2017 Lucas Czech 4  5  This program is free software: you can redistribute it and/or modify 6  it under the terms of the GNU General Public License as published by 7  the Free Software Foundation, either version 3 of the License, or 8  (at your option) any later version. 9  10  This program is distributed in the hope that it will be useful, 11  but WITHOUT ANY WARRANTY; without even the implied warranty of 12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13  GNU General Public License for more details. 14  15  You should have received a copy of the GNU General Public License 16  along with this program. If not, see <http://www.gnu.org/licenses/>. 17  18  Contact: 19  Lucas Czech <[email protected]> 20  Exelixis Lab, Heidelberg Institute for Theoretical Studies 21  Schloss-Wolfsbrunnenweg 35, D-69118 Heidelberg, Germany 22 */ 23  32  34  35 #include <algorithm> 36 #include <assert.h> 37 #include <iomanip> 38 #include <sstream> 39 #include <stdexcept> 40 #include <string> 41 #include <vector> 42  43 namespace genesis { 44 namespace utils { 45  46 // ================================================================================================= 47 // Text Table 48 // ================================================================================================= 49  50 // --------------------------------------------------------------------- 51 // Accessors 52 // --------------------------------------------------------------------- 53  54 size_t Table::length() const 55 { 56  if( columns_.size() == 0 ) { 57  return 0; 58  } 59  60  size_t len = columns_[0].length(); 61  for( auto const& c : columns_ ) { 62  if( len != c.length() ) { 63  throw std::length_error("Table columns are unevenly filled."); 64  } 65  } 66  67  // If we are here, no exception was thrown. Thus, all columns are evenly filled. The current 68  // marker for inserting new values therefore needs to be at the first column, too. If not, 69  // we failed to set it correctly somewhere. 70  assert( current_col_ == 0 ); 71  72  return len; 73 } 74  75 // --------------------------------------------------------------------- 76 // Modifiers 77 // --------------------------------------------------------------------- 78  83 { 84  current_col_ = 0; 85  columns_.clear(); 86 } 87  92 { 93  current_col_ = 0; 94  for( auto& c : columns_ ) { 95  c.clear_content(); 96  } 97 } 98  105 Table::Column& Table::add_column( std::string label ) 106 { 107  auto const len = length(); 108  columns_.push_back( Column(label) ); 109  for( size_t i = 0; i < len; ++i ) { 110  columns_.back().append(""); 111  } 112  return columns_.back(); 113 } 114  115 Table& Table::operator << ( std::string value ) 116 { 117  return append( value ); 118 } 119  120 // Table& Table::operator << ( Style const& value ) 121 // { 122 // return append( value ); 123 // } 124  125 Table& Table::append ( std::string value ) 126 { 127  columns_[ current_col_ ].append( value ); 128  129  ++current_col_; 130  if( current_col_ >= columns_.size() ) { 131  current_col_ = 0; 132  } 133  134  return *this; 135 } 136  137 Table& Table::append ( Style const& style, std::string value ) 138 { 139  columns_[ current_col_ ].append( style, value ); 140  141  ++current_col_; 142  if( current_col_ >= columns_.size() ) { 143  current_col_ = 0; 144  } 145  146  return *this; 147 } 148  157 { 158  while( current_col_ < columns_.size() ) { 159  columns_[ current_col_ ].append(""); 160  ++current_col_; 161  } 162  163  current_col_ = 0; 164  return *this; 165 } 166  167 // --------------------------------------------------------------------- 168 // Output 169 // --------------------------------------------------------------------- 170  171 void Table::write( std::ostream& out ) const 172 { 173  // Write labels. 174  for( auto const& c : columns_ ) { 175  c.write_label(out); 176  out << " "; 177  } 178  out << "\n"; 179  180  // Write data. 181  for( size_t i = 0; i < length(); ++i ) { 182  for( auto const& c : columns_ ) { 183  c.write_row(out, i); 184  out << " "; 185  } 186  out << "\n"; 187  } 188 } 189  190 void Table::write( std::ostream& out, TableLayout const& layout ) const 191 { 192  // Take a TableLayout Line and print it according to the table data. 193  auto write_line = [&] (TableLayout::Line const& line) { 194  if( line.enabled ) { 195  out << line.left_border; 196  for( size_t ci = 0; ci < columns_.size(); ++ci ) { 197  // out << std::string( columns_[ci].width(), line.filler ); 198  for( size_t i = 0; i < columns_[ci].width(); ++i ) { 199  out << line.filler; 200  } 201  if( ci < columns_.size() - 1 ) { 202  out << line.separator; 203  } 204  } 205  out << line.right_border << "\n"; 206  } 207  }; 208  209  // Write line above header. 210  write_line(layout.top); 211  212  // Write labels. 213  out << layout.header.left_border; 214  for( size_t ci = 0; ci < columns_.size(); ++ci ) { 215  columns_[ci].write_label(out); 216  if( ci < columns_.size() - 1 ) { 217  out << layout.header.separator; 218  } 219  } 220  out << layout.header.right_border << "\n"; 221  222  // Write line between header and content. 223  write_line(layout.separator); 224  225  // Write data. 226  for( size_t i = 0; i < length(); ++i ) { 227  out << layout.row.left_border; 228  for( size_t ci = 0; ci < columns_.size(); ++ci ) { 229  columns_[ci].write_row(out, i); 230  if( ci < columns_.size() - 1 ) { 231  out << layout.row.separator; 232  } 233  } 234  out << layout.row.right_border << "\n"; 235  } 236  237  // Write line below content. 238  write_line(layout.bottom); 239 } 240  241 std::string Table::to_string() const 242 { 243  std::stringstream ss; 244  write(ss); 245  return ss.str(); 246 } 247  248 std::string Table::to_string( TableLayout const& layout ) const 249 { 250  std::stringstream ss; 251  write(ss, layout); 252  return ss.str(); 253 } 254  255 std::ostream& operator << (std::ostream& out, Table const& table) 256 { 257  table.write(out); 258  return out; 259 } 260  261 // ================================================================================================= 262 // Table Column 263 // ================================================================================================= 264  265 // --------------------------------------------------------------------- 266 // Properties 267 // --------------------------------------------------------------------- 268  269 void Table::Column::label( std::string value ) 270 { 271  width_ = std::max( width_, value.size() ); 272  label_ = value; 273 } 274  275 std::string Table::Column::label() const 276 { 277  return label_; 278 } 279  281 { 282  just_ = value; 283 } 284  286 { 287  return just_; 288 } 289  297 void Table::Column::width( size_t value ) 298 { 299  if( value > width_ ) { 300  width_ = value; 301  } 302 } 303  304 size_t Table::Column::width() const 305 { 306  return width_; 307 } 308  318 { 319  size_t mx = label_.size(); 320  for( auto const& v : data_ ) { 321  mx = std::max( mx, v.second.size() ); 322  } 323  width_ = mx; 324 } 325  326 // --------------------------------------------------------------------- 327 // Accessors 328 // --------------------------------------------------------------------- 329  330 size_t Table::Column::length() const 331 { 332  return data_.size(); 333 } 334  335 std::string Table::Column::row( size_t i ) const 336 { 337  // Throws out of range if neccessary. 338  return data_.at( i ).second; 339 } 340  341 // --------------------------------------------------------------------- 342 // Modifiers 343 // --------------------------------------------------------------------- 344  346 { 347  width_ = label_.size(); 348  data_.clear(); 349 } 350  351 void Table::Column::append( std::string value ) 352 { 353  width_ = std::max( width_, value.size() ); 354  data_.push_back({ Style(), value }); 355 } 356  357 void Table::Column::append( Style const& style, std::string value ) 358 { 359  width_ = std::max( width_, value.size() ); 360  data_.push_back({ style, value }); 361 } 362  363 // --------------------------------------------------------------------- 364 // Output 365 // --------------------------------------------------------------------- 366  367 void Table::Column::write_row( std::ostream& stream, size_t row ) const 368 { 369  // Throws out_of_range if neccessary. 370  auto data = data_.at(row); 371  write( stream, data.first, data.second ); 372 } 373  374 void Table::Column::write_label( std::ostream& stream ) const 375 { 376  write( stream, Style(), label_ ); 377 } 378  379 void Table::Column::write( std::ostream& stream, Style style, std::string text ) const 380 { 381  assert( text.size() <= width_ ); 382  383  if( just_ == Justification::kLeft ) { 384  text = text + std::string(width_ - text.size(), ' '); 385  } 386  if( just_ == Justification::kCentered ) { 387  const size_t pad = (width_ - text.size()) / 2; 388  text = std::string(pad, ' ') + text + std::string(width_ - text.size() - pad, ' '); 389  } 390  if( just_ == Justification::kRight ) { 391  text = std::string(width_ - text.size(), ' ') + text; 392  } 393  394  stream << style( text ); 395 } 396  397 // ================================================================================================= 398 // TableLayout 399 // ================================================================================================= 400  401 // --------------------------------------------------------------------- 402 // Binding 403 // --------------------------------------------------------------------- 404  405 std::ostream& operator << (std::ostream& out, TableLayout::Binder const& binder) 406 { 407  binder.table.write(out, binder.layout); 408  return out; 409 } 410  431 { 432  return Binder(*this, table); 433 } 434  435 // --------------------------------------------------------------------- 436 // Default TableLayouts 437 // --------------------------------------------------------------------- 438  440 { 441  // TableLayout already has minimal settings (just a space as separator, nothing else). 442  return TableLayout(); 443 } 444  445 TableLayout simple_layout( bool condensed ) 446 { 447  auto f = TableLayout(); 448  449  f.header.left_border = (condensed ? "" : " "); 450  f.header.separator = (condensed ? " " : " "); 451  f.header.right_border = (condensed ? "" : " "); 452  453  f.separator.enabled = true; 454  f.separator.left_border = (condensed ? "" : "-"); 455  f.separator.filler = "-"; 456  f.separator.separator = (condensed ? " " : "- -"); 457  f.separator.right_border = (condensed ? "" : "-"); 458  459  f.row = f.header; 460  461  return f; 462 } 463  464 TableLayout simple_grid( bool condensed ) 465 { 466  auto f = TableLayout(); 467  468  f.header.left_border = (condensed ? "" : " "); 469  f.header.separator = (condensed ? "|" : " | "); 470  f.header.right_border = (condensed ? "" : " "); 471  472  f.separator.enabled = true; 473  f.separator.left_border = (condensed ? "" : "-"); 474  f.separator.filler = "-"; 475  f.separator.separator = (condensed ? "+" : "-+-"); 476  f.separator.right_border = (condensed ? "" : "-"); 477  478  f.row = f.header; 479  480  return f; 481 } 482  483 TableLayout simple_frame( bool condensed ) 484 { 485  auto f = TableLayout(); 486  487  f.top.enabled = true; 488  f.top.left_border = (condensed ? "+" : "+-"); 489  f.top.filler = "-"; 490  f.top.separator = (condensed ? "+" : "-+-"); 491  f.top.right_border = (condensed ? "+" : "-+"); 492  493  f.header.left_border = (condensed ? "|" : "| "); 494  f.header.separator = (condensed ? "|" : " | "); 495  f.header.right_border = (condensed ? "|" : " |"); 496  497  f.separator = f.top; 498  f.row = f.header; 499  f.bottom = f.top; 500  501  return f; 502 } 503  504 TableLayout extended_grid( bool condensed ) 505 { 506  auto f = TableLayout(); 507  508  f.header.left_border = (condensed ? "" : " "); 509  f.header.separator = (condensed ? "│" : " │ "); 510  f.header.right_border = (condensed ? "" : " "); 511  512  f.separator.enabled = true; 513  f.separator.left_border = (condensed ? "" : "─"); 514  f.separator.filler = "─"; 515  f.separator.separator = (condensed ? "┼" : "─┼─"); 516  f.separator.right_border = (condensed ? "" : "─"); 517  518  f.row = f.header; 519  520  return f; 521 } 522  523 TableLayout extended_frame( bool condensed ) 524 { 525  auto f = TableLayout(); 526  527  f.top.enabled = true; 528  f.top.left_border = (condensed ? "┌" : "┌─"); 529  f.top.filler = "─"; 530  f.top.separator = (condensed ? "┬" : "─┬─"); 531  f.top.right_border = (condensed ? "┐" : "─┐"); 532  533  f.header.left_border = (condensed ? "│" : "│ "); 534  f.header.separator = (condensed ? "│" : " │ "); 535  f.header.right_border = (condensed ? "│" : " │"); 536  537  f.separator.enabled = true; 538  f.separator.left_border = (condensed ? "├" : "├─"); 539  f.separator.filler = "─"; 540  f.separator.separator = (condensed ? "┼" : "─┼─"); 541  f.separator.right_border = (condensed ? "┤" : "─┤"); 542  543  f.row = f.header; 544  545  f.bottom.enabled = true; 546  f.bottom.left_border = (condensed ? "└" : "└─"); 547  f.bottom.filler = "─"; 548  f.bottom.separator = (condensed ? "┴" : "─┴─"); 549  f.bottom.right_border = (condensed ? "┘" : "─┘"); 550  551  return f; 552 } 553  554 TableLayout double_grid( bool condensed ) 555 { 556  auto f = TableLayout(); 557  558  f.header.left_border = (condensed ? "" : " "); 559  f.header.separator = (condensed ? "║" : " ║ "); 560  f.header.right_border = (condensed ? "" : " "); 561  562  f.separator.enabled = true; 563  f.separator.left_border = (condensed ? "" : "═"); 564  f.separator.filler = "═"; 565  f.separator.separator = (condensed ? "╬" : "═╬═"); 566  f.separator.right_border = (condensed ? "" : "═"); 567  568  f.row = f.header; 569  570  return f; 571 } 572  573 TableLayout double_frame( bool condensed ) 574 { 575  auto f = TableLayout(); 576  577  f.top.enabled = true; 578  f.top.left_border = (condensed ? "╔" : "╔═"); 579  f.top.filler = "═"; 580  f.top.separator = (condensed ? "╦" : "═╦═"); 581  f.top.right_border = (condensed ? "╗" : "═╗"); 582  583  f.header.left_border = (condensed ? "║" : "║ "); 584  f.header.separator = (condensed ? "║" : " ║ "); 585  f.header.right_border = (condensed ? "║" : " ║"); 586  587  f.separator.enabled = true; 588  f.separator.left_border = (condensed ? "╠" : "╠═"); 589  f.separator.filler = "═"; 590  f.separator.separator = (condensed ? "╬" : "═╬═"); 591  f.separator.right_border = (condensed ? "╣" : "═╣"); 592  593  f.row = f.header; 594  595  f.bottom.enabled = true; 596  f.bottom.left_border = (condensed ? "╚" : "╚═"); 597  f.bottom.filler = "═"; 598  f.bottom.separator = (condensed ? "╩" : "═╩═"); 599  f.bottom.right_border = (condensed ? "╝" : "═╝"); 600  601  return f; 602 } 603  604 } // namespace utils 605 } // namespace genesis TableLayout minimal_layout() void write_label(std::ostream &stream) const TableLayout double_grid(bool condensed) void shrink_width() Shrinks the column width to the minmal width that suffices to fit in all values of the column (i... TableLayout simple_layout(bool condensed) void clear() Clears all columns and their data from the table. std::string to_string() const One line of the TableLayout. Simple text style class for colorized and bold output to a terminal. Definition: style.hpp:81 void clear_content() Clears the data contents of all columns. Their labels etc stay unchanged. Column & add_column(std::string label="") Add a column to the table. Binder operator()(Table const &table) Functional operator that allows to bind a TableLayout to a Table so that they can be used in one ostr... std::string row(size_t i) const Helper struct to bind a layout to a table. void append(std::string value) Table & line_break() Finish the currently line and move to the next one. std::ostream & operator<<(std::ostream &os, const Matrix< T > &matrix) Print the elements of a Matrix to a stream, using operator << for each element. TableLayout extended_frame(bool condensed) Provides some commonly used string utility functions. TableLayout extended_grid(bool condensed) TableLayout simple_grid(bool condensed) void write(std::ostream &out) const Table & operator<<(std::string value) void write_row(std::ostream &stream, size_t row) const Justification justify() const Table & append(std::string value) TableLayout simple_frame(bool condensed) TableLayout double_frame(bool condensed)
__label__pos
0.995583
Cody Solution 1613066 Submitted on 20 Aug 2018 by halleyhit This solution is locked. To view this solution, you need to provide a solution of the same size or smaller. Test Suite Test Status Code Input and Output 1   Pass n = 2; p_correct = 1; assert(isequal(factor_digits(n),p_correct)) f = 2 2   Pass n = 26; p_correct = [1 0 0 0 0 1]; assert(isequal(factor_digits(n),p_correct)) f = 2 13 3   Pass n = 168; p_correct = [1 0 1 3]; assert(isequal(factor_digits(n),p_correct)) f = 2 2 2 3 7 4   Pass n = 999; p_correct = [1 0 0 0 0 0 0 0 0 0 3 0]; assert(isequal(factor_digits(n),p_correct)) f = 3 3 3 37
__label__pos
0.959675
Post a New Question college algebra posted by on . Solve for 3x + 6Y - 9z = 3 5x - 7y + 10z = -4 11x +4y - 6z = 2 • college algebra - , take the first equation, and the third equation. mulitiply the first by 2, and the third by 3. 6x+12y-18z=6 33x+12y-18z=6 that implies that x is zero. then the first and second 6y-9z=3 -7y+10z=-4 multply the first by 10, the second by 9 60y-90z=30 -63y+90z=-36 add, -3y=6 or y=-2 solve for z in any equation, check your answers. • college algebra - , Since you don't specify which method, let's use good ol' elimination. #1x2 ---> 6x+12y-18z=6 #3x3 ---> 33x+12y-18z=6 subtract, by luck both z and y disappear x = 0 #2x3 ---> 15x-21y+30z=-12 #3x5 ---> 55x+20y-30z=10 add 70x-y=-2 but x=0, so y = 2 sub back into #3 to get z = 1 x=0 y=2 z=1 • college algebra - , 2x^3(2x^2+4x+3) Answer This Question First Name: School Subject: Answer: Related Questions More Related Questions Post a New Question
__label__pos
0.977005
Finite State Machine Sometimes we need to apply a state in some cases when we want to modify a status of an object. A basic example is water state: gaseous, liquid and solid and the events responsible for change it: Object: Water Event: Fusion States: From solid to liquid Event: Evaporation States: From liquid to gaseous Event: Condensation States: From gaseous to liquid Event: Solidification States: From liquid to solid Look at this as a simple example that happens around us. As in many other cases, we can apply it in programming using something called state machine. In the above example, we have different states and events responsible for change these states. Proposed, accepted, rejected and pending are possible states for a object and the state of an object can change when an event occurs. Pretty similar to the example of water states, right? RailsGyn — RGSoC 2017 Written by We are two women(Amanda and Juliana), developers from Brazil and we are participating in this AMAZING initiative that is Rails Girls Summer of Code. Welcome to a place where words matter. On Medium, smart voices and original ideas take center stage - with no ads in sight. Watch Follow all the topics you care about, and we’ll deliver the best stories for you to your homepage and inbox. Explore Get unlimited access to the best stories on Medium — and support writers while you’re at it. Just $5/month. Upgrade
__label__pos
0.606752
BBCode tutorial View previous topic View next topic Go down BBCode tutorial Post  Admin on Mon Dec 26, 2011 9:17 pm BBCode is a special implementation of HTML. Whether you can actually use BBCode in your posts on the forum is determined by the administrator. In addition, you can disable BBCode on a per post basis via the posting form. BBCode itself is similar in style to HTML: tags are enclosed in square brackets [ and ] rather than < and > and it offers greater control over what and how something is displayed. Depending on the template you are using you may find adding BBCode to your posts is made much easier through a clickable interface above the message area on the posting form. Even with this you may find the following guide useful. There are funny looking faces on one side of the page. These faces are called Emoticons. There are many types of them, and the admin's of the forum will decide what Emoticons you can use. Those aren't ll the Emocations on the forum, there is a button just above all of the Emoticons you see. It should say "Veiw more Emoticons" Then a list will appear, you can select watever selection you fancy. Now you will be wondering How you use the Emoticons, well it's really basic. Just click on the Emoticon you want in your message and a code will apper. This is a shortend IMG code. They can very. For this example I will use the king Emoticon. The code is: Code: :king: When your posting, altought wen you click preveiw it will show as king, cool right. You can use Emoticons for many things. So you want something to pop out more? Then why not bold it? Bolding is basic. hit the button that is right on top of the postal area. Again a code shall appear, it will look like so: Code:  [b] Then you hit the butto again. Code: [b][/b] You then type between the code like this: Code:  [b] Respect the rules [/b] It will then apear like so: Respect the rules So you want to slant something? Well you can. There is a button for it right above the postal zone. The button looks like this: . It will slant it like this When you click the button some coding will appear like so: Code: [i] Then you hit the button a second time so this is what is there: Code: [i][/i] Then you type what ou want inside the text: Code: [i]respect the rules[/i] and it will appear like this: respect the rules Do you want something underlined? If you do then you'll love this. There is an underline tool. There is a button that looks like so: . This is the same as the bold and under line. When you click the button some coding will appear like so: Code: [u] Then you hit the button a second time so this is what is there: Code: [u][/u] Then you type what ou want inside the text: Code: [u]respect the rules[/u] and it will appear like this: respect the rules Can you read this? Do you want to do that? You can do intesting things with the cross out tool. It is located right above the postal zone and looks like this: pretty lame cool right? Well heres how you use it. When you click the button some coding will appear like so: Code: [strike] Then you hit the button a second time so this is what is there: Code: [strike][/strike] Then you type what ou want inside the text: Code: [strike]break[/strike] follow the rules and it will appear like this: break follow the rules If you wonder what the button does, it does nothing... I don't know why they even have it there. You can also make things so there in the center of the page. like this Very Happy To do this by clicking he button right above the postal zone area. The button looks like this: A BBCode will apear. The code will look like this: Code: you can then type watever you feel like inside of the code. like this: Code: [center]respect the rules[/center] It will then apear like this: respect the rules If you type beside it like i am right now, it will appear like this Wink . Do you want to flip what your writing to the other side of the page? it will look like this then you can, with ease. All you have to do is click the button on located right above the postal area. A code will appear like this: Code: [right][/right] Then you type wa you want inbetween the code, like so: Code: [right]respect the rule[/right] It will then appear like this: respect the rules I will complete later. Last edited by Admin on Wed Dec 28, 2011 10:07 am; edited 2 times in total (Reason for editing : added to the tutorial.) Admin Admin Posts: 50 Join date: 2011-12-24 Age: 15 Location: North Ameica View user profile http://www.wix.com/kingmitt/pokemon-galaxy Back to top Go down Re: BBCode tutorial Post  Pyro on Fri Dec 30, 2011 12:27 am Thanks for the information.I used to forum and I know what most of the buttons do.Wait...I don't know we can choose what font to use! Let me try. I'm trying it...Is it work? Pyro Posts: 30 Join date: 2011-12-29 Age: 17 View user profile Back to top Go down Re: BBCode tutorial Post  Admin on Fri Dec 30, 2011 11:13 am Pyro wrote:Thanks for the information.I used to forum and I know what most of the buttons do.Wait...I don't know we can choose what font to use! Let me try. I'm trying it...Is it work? Yep, its workng Admin Admin Posts: 50 Join date: 2011-12-24 Age: 15 Location: North Ameica View user profile http://www.wix.com/kingmitt/pokemon-galaxy Back to top Go down View previous topic View next topic Back to top - Similar topics Permissions in this forum: You cannot reply to topics in this forum
__label__pos
0.849734
blob: 796f38a73b176786e120601d126fa32c54009ebf [file] [log] [blame] // Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Check that we can traverse very deep stacks of ConsStrings using // StringCharacterStram. Check that Get(int) works on very deep stacks // of ConsStrings. These operations may not be very fast, but they // should be possible without getting errors due to too deep recursion. #include <stdlib.h> #include "src/init/v8.h" #include "src/api/api-inl.h" #include "src/base/platform/elapsed-timer.h" #include "src/execution/messages.h" #include "src/heap/factory.h" #include "src/heap/heap-inl.h" #include "src/objects/objects-inl.h" #include "src/strings/unicode-decoder.h" #include "test/cctest/cctest.h" #include "test/cctest/heap/heap-utils.h" // Adapted from http://en.wikipedia.org/wiki/Multiply-with-carry class MyRandomNumberGenerator { public: MyRandomNumberGenerator() { init(); } void init(uint32_t seed = 0x5688C73E) { static const uint32_t phi = 0x9E3779B9; c = 362436; i = kQSize-1; Q[0] = seed; Q[1] = seed + phi; Q[2] = seed + phi + phi; for (unsigned j = 3; j < kQSize; j++) { Q[j] = Q[j - 3] ^ Q[j - 2] ^ phi ^ j; } } uint32_t next() { uint64_t a = 18782; uint32_t r = 0xFFFFFFFE; i = (i + 1) & (kQSize-1); uint64_t t = a * Q[i] + c; c = (t >> 32); uint32_t x = static_cast<uint32_t>(t + c); if (x < c) { x++; c++; } return (Q[i] = r - x); } uint32_t next(int max) { return next() % max; } bool next(double threshold) { CHECK(threshold >= 0.0 && threshold <= 1.0); if (threshold == 1.0) return true; if (threshold == 0.0) return false; uint32_t value = next() % 100000; return threshold > static_cast<double>(value)/100000.0; } private: static const uint32_t kQSize = 4096; uint32_t Q[kQSize]; uint32_t c; uint32_t i; }; namespace v8 { namespace internal { namespace test_strings { static const int DEEP_DEPTH = 8 * 1024; static const int SUPER_DEEP_DEPTH = 80 * 1024; class Resource: public v8::String::ExternalStringResource { public: Resource(const uc16* data, size_t length): data_(data), length_(length) {} ~Resource() override { i::DeleteArray(data_); } const uint16_t* data() const override { return data_; } size_t length() const override { return length_; } private: const uc16* data_; size_t length_; }; class OneByteResource : public v8::String::ExternalOneByteStringResource { public: OneByteResource(const char* data, size_t length) : data_(data), length_(length) {} ~OneByteResource() override { i::DeleteArray(data_); } const char* data() const override { return data_; } size_t length() const override { return length_; } private: const char* data_; size_t length_; }; static void InitializeBuildingBlocks(Handle<String>* building_blocks, int bb_length, bool long_blocks, MyRandomNumberGenerator* rng) { // A list of pointers that we don't have any interest in cleaning up. // If they are reachable from a root then leak detection won't complain. Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); for (int i = 0; i < bb_length; i++) { int len = rng->next(16); int slice_head_chars = 0; int slice_tail_chars = 0; int slice_depth = 0; for (int j = 0; j < 3; j++) { if (rng->next(0.35)) slice_depth++; } // Must truncate something for a slice string. Loop until // at least one end will be sliced. while (slice_head_chars == 0 && slice_tail_chars == 0) { slice_head_chars = rng->next(15); slice_tail_chars = rng->next(12); } if (long_blocks) { // Generate building blocks which will never be merged len += ConsString::kMinLength + 1; } else if (len > 14) { len += 1234; } // Don't slice 0 length strings. if (len == 0) slice_depth = 0; int slice_length = slice_depth*(slice_head_chars + slice_tail_chars); len += slice_length; switch (rng->next(4)) { case 0: { uc16 buf[2000]; for (int j = 0; j < len; j++) { buf[j] = rng->next(0x10000); } building_blocks[i] = factory->NewStringFromTwoByte( Vector<const uc16>(buf, len)).ToHandleChecked(); for (int j = 0; j < len; j++) { CHECK_EQ(buf[j], building_blocks[i]->Get(j)); } break; } case 1: { char buf[2000]; for (int j = 0; j < len; j++) { buf[j] = rng->next(0x80); } building_blocks[i] = factory->NewStringFromOneByte(OneByteVector(buf, len)) .ToHandleChecked(); for (int j = 0; j < len; j++) { CHECK_EQ(buf[j], building_blocks[i]->Get(j)); } break; } case 2: { uc16* buf = NewArray<uc16>(len); for (int j = 0; j < len; j++) { buf[j] = rng->next(0x10000); } Resource* resource = new Resource(buf, len); building_blocks[i] = v8::Utils::OpenHandle( *v8::String::NewExternalTwoByte(CcTest::isolate(), resource) .ToLocalChecked()); for (int j = 0; j < len; j++) { CHECK_EQ(buf[j], building_blocks[i]->Get(j)); } break; } case 3: { char* buf = NewArray<char>(len); for (int j = 0; j < len; j++) { buf[j] = rng->next(0x80); } OneByteResource* resource = new OneByteResource(buf, len); building_blocks[i] = v8::Utils::OpenHandle( *v8::String::NewExternalOneByte(CcTest::isolate(), resource) .ToLocalChecked()); for (int j = 0; j < len; j++) { CHECK_EQ(buf[j], building_blocks[i]->Get(j)); } break; } } for (int j = slice_depth; j > 0; j--) { building_blocks[i] = factory->NewSubString( building_blocks[i], slice_head_chars, building_blocks[i]->length() - slice_tail_chars); } CHECK(len == building_blocks[i]->length() + slice_length); } } class ConsStringStats { public: ConsStringStats() { Reset(); } void Reset(); void VerifyEqual(const ConsStringStats& that) const; int leaves_; int empty_leaves_; int chars_; int left_traversals_; int right_traversals_; private: DISALLOW_COPY_AND_ASSIGN(ConsStringStats); }; void ConsStringStats::Reset() { leaves_ = 0; empty_leaves_ = 0; chars_ = 0; left_traversals_ = 0; right_traversals_ = 0; } void ConsStringStats::VerifyEqual(const ConsStringStats& that) const { CHECK_EQ(this->leaves_, that.leaves_); CHECK_EQ(this->empty_leaves_, that.empty_leaves_); CHECK_EQ(this->chars_, that.chars_); CHECK_EQ(this->left_traversals_, that.left_traversals_); CHECK_EQ(this->right_traversals_, that.right_traversals_); } class ConsStringGenerationData { public: static const int kNumberOfBuildingBlocks = 256; explicit ConsStringGenerationData(bool long_blocks); void Reset(); inline Handle<String> block(int offset); inline Handle<String> block(uint32_t offset); // Input variables. double early_termination_threshold_; double leftness_; double rightness_; double empty_leaf_threshold_; int max_leaves_; // Cached data. Handle<String> building_blocks_[kNumberOfBuildingBlocks]; String empty_string_; MyRandomNumberGenerator rng_; // Stats. ConsStringStats stats_; int early_terminations_; private: DISALLOW_COPY_AND_ASSIGN(ConsStringGenerationData); }; ConsStringGenerationData::ConsStringGenerationData(bool long_blocks) { rng_.init(); InitializeBuildingBlocks( building_blocks_, kNumberOfBuildingBlocks, long_blocks, &rng_); empty_string_ = ReadOnlyRoots(CcTest::heap()).empty_string(); Reset(); } Handle<String> ConsStringGenerationData::block(uint32_t offset) { return building_blocks_[offset % kNumberOfBuildingBlocks ]; } Handle<String> ConsStringGenerationData::block(int offset) { CHECK_GE(offset, 0); return building_blocks_[offset % kNumberOfBuildingBlocks]; } void ConsStringGenerationData::Reset() { early_termination_threshold_ = 0.01; leftness_ = 0.75; rightness_ = 0.75; empty_leaf_threshold_ = 0.02; max_leaves_ = 1000; stats_.Reset(); early_terminations_ = 0; rng_.init(); } void AccumulateStats(ConsString cons_string, ConsStringStats* stats) { int left_length = cons_string.first().length(); int right_length = cons_string.second().length(); CHECK(cons_string.length() == left_length + right_length); // Check left side. bool left_is_cons = cons_string.first().IsConsString(); if (left_is_cons) { stats->left_traversals_++; AccumulateStats(ConsString::cast(cons_string.first()), stats); } else { CHECK_NE(left_length, 0); stats->leaves_++; stats->chars_ += left_length; } // Check right side. if (cons_string.second().IsConsString()) { stats->right_traversals_++; AccumulateStats(ConsString::cast(cons_string.second()), stats); } else { if (right_length == 0) { stats->empty_leaves_++; CHECK(!left_is_cons); } stats->leaves_++; stats->chars_ += right_length; } } void AccumulateStats(Handle<String> cons_string, ConsStringStats* stats) { DisallowHeapAllocation no_allocation; if (cons_string->IsConsString()) { return AccumulateStats(ConsString::cast(*cons_string), stats); } // This string got flattened by gc. stats->chars_ += cons_string->length(); } void AccumulateStatsWithOperator(ConsString cons_string, ConsStringStats* stats) { ConsStringIterator iter(cons_string); int offset; for (String string = iter.Next(&offset); !string.is_null(); string = iter.Next(&offset)) { // Accumulate stats. CHECK_EQ(0, offset); stats->leaves_++; stats->chars_ += string.length(); } } void VerifyConsString(Handle<String> root, ConsStringGenerationData* data) { // Verify basic data. CHECK(root->IsConsString()); CHECK_EQ(root->length(), data->stats_.chars_); // Recursive verify. ConsStringStats stats; AccumulateStats(ConsString::cast(*root), &stats); stats.VerifyEqual(data->stats_); // Iteratively verify. stats.Reset(); AccumulateStatsWithOperator(ConsString::cast(*root), &stats); // Don't see these. Must copy over. stats.empty_leaves_ = data->stats_.empty_leaves_; stats.left_traversals_ = data->stats_.left_traversals_; stats.right_traversals_ = data->stats_.right_traversals_; // Adjust total leaves to compensate. stats.leaves_ += stats.empty_leaves_; stats.VerifyEqual(data->stats_); } static Handle<String> ConstructRandomString(ConsStringGenerationData* data, unsigned max_recursion) { Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); // Compute termination characteristics. bool terminate = false; bool flat = data->rng_.next(data->empty_leaf_threshold_); bool terminate_early = data->rng_.next(data->early_termination_threshold_); if (terminate_early) data->early_terminations_++; // The obvious condition. terminate |= max_recursion == 0; // Flat cons string terminate by definition. terminate |= flat; // Cap for max leaves. terminate |= data->stats_.leaves_ >= data->max_leaves_; // Roll the dice. terminate |= terminate_early; // Compute termination characteristics for each side. bool terminate_left = terminate || !data->rng_.next(data->leftness_); bool terminate_right = terminate || !data->rng_.next(data->rightness_); // Generate left string. Handle<String> left; if (terminate_left) { left = data->block(data->rng_.next()); data->stats_.leaves_++; data->stats_.chars_ += left->length(); } else { data->stats_.left_traversals_++; } // Generate right string. Handle<String> right; if (terminate_right) { right = data->block(data->rng_.next()); data->stats_.leaves_++; data->stats_.chars_ += right->length(); } else { data->stats_.right_traversals_++; } // Generate the necessary sub-nodes recursively. if (!terminate_right) { // Need to balance generation fairly. if (!terminate_left && data->rng_.next(0.5)) { left = ConstructRandomString(data, max_recursion - 1); } right = ConstructRandomString(data, max_recursion - 1); } if (!terminate_left && left.is_null()) { left = ConstructRandomString(data, max_recursion - 1); } // Build the cons string. Handle<String> root = factory->NewConsString(left, right).ToHandleChecked(); CHECK(root->IsConsString() && !root->IsFlat()); // Special work needed for flat string. if (flat) { data->stats_.empty_leaves_++; String::Flatten(isolate, root); CHECK(root->IsConsString() && root->IsFlat()); } return root; } static Handle<String> ConstructLeft( ConsStringGenerationData* data, int depth) { Factory* factory = CcTest::i_isolate()->factory(); Handle<String> answer = factory->NewStringFromStaticChars(""); data->stats_.leaves_++; for (int i = 0; i < depth; i++) { Handle<String> block = data->block(i); Handle<String> next = factory->NewConsString(answer, block).ToHandleChecked(); if (next->IsConsString()) data->stats_.leaves_++; data->stats_.chars_ += block->length(); answer = next; } data->stats_.left_traversals_ = data->stats_.leaves_ - 2; return answer; } static Handle<String> ConstructRight( ConsStringGenerationData* data, int depth) { Factory* factory = CcTest::i_isolate()->factory(); Handle<String> answer = factory->NewStringFromStaticChars(""); data->stats_.leaves_++; for (int i = depth - 1; i >= 0; i--) { Handle<String> block = data->block(i); Handle<String> next = factory->NewConsString(block, answer).ToHandleChecked(); if (next->IsConsString()) data->stats_.leaves_++; data->stats_.chars_ += block->length(); answer = next; } data->stats_.right_traversals_ = data->stats_.leaves_ - 2; return answer; } static Handle<String> ConstructBalancedHelper( ConsStringGenerationData* data, int from, int to) { Factory* factory = CcTest::i_isolate()->factory(); CHECK(to > from); if (to - from == 1) { data->stats_.chars_ += data->block(from)->length(); return data->block(from); } if (to - from == 2) { data->stats_.chars_ += data->block(from)->length(); data->stats_.chars_ += data->block(from+1)->length(); return factory->NewConsString(data->block(from), data->block(from+1)) .ToHandleChecked(); } Handle<String> part1 = ConstructBalancedHelper(data, from, from + ((to - from) / 2)); Handle<String> part2 = ConstructBalancedHelper(data, from + ((to - from) / 2), to); if (part1->IsConsString()) data->stats_.left_traversals_++; if (part2->IsConsString()) data->stats_.right_traversals_++; return factory->NewConsString(part1, part2).ToHandleChecked(); } static Handle<String> ConstructBalanced( ConsStringGenerationData* data, int depth = DEEP_DEPTH) { Handle<String> string = ConstructBalancedHelper(data, 0, depth); data->stats_.leaves_ = data->stats_.left_traversals_ + data->stats_.right_traversals_ + 2; return string; } static void Traverse(Handle<String> s1, Handle<String> s2) { int i = 0; StringCharacterStream character_stream_1(*s1); StringCharacterStream character_stream_2(*s2); while (character_stream_1.HasMore()) { CHECK(character_stream_2.HasMore()); uint16_t c = character_stream_1.GetNext(); CHECK_EQ(c, character_stream_2.GetNext()); i++; } CHECK(!character_stream_1.HasMore()); CHECK(!character_stream_2.HasMore()); CHECK_EQ(s1->length(), i); CHECK_EQ(s2->length(), i); } static void TraverseFirst(Handle<String> s1, Handle<String> s2, int chars) { int i = 0; StringCharacterStream character_stream_1(*s1); StringCharacterStream character_stream_2(*s2); while (character_stream_1.HasMore() && i < chars) { CHECK(character_stream_2.HasMore()); uint16_t c = character_stream_1.GetNext(); CHECK_EQ(c, character_stream_2.GetNext()); i++; } s1->Get(s1->length() - 1); s2->Get(s2->length() - 1); } TEST(Traverse) { printf("TestTraverse\n"); CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); v8::HandleScope scope(CcTest::isolate()); ConsStringGenerationData data(false); Handle<String> flat = ConstructBalanced(&data); String::Flatten(isolate, flat); Handle<String> left_asymmetric = ConstructLeft(&data, DEEP_DEPTH); Handle<String> right_asymmetric = ConstructRight(&data, DEEP_DEPTH); Handle<String> symmetric = ConstructBalanced(&data); printf("1\n"); Traverse(flat, symmetric); printf("2\n"); Traverse(flat, left_asymmetric); printf("3\n"); Traverse(flat, right_asymmetric); printf("4\n"); Handle<String> left_deep_asymmetric = ConstructLeft(&data, SUPER_DEEP_DEPTH); Handle<String> right_deep_asymmetric = ConstructRight(&data, SUPER_DEEP_DEPTH); printf("5\n"); TraverseFirst(left_asymmetric, left_deep_asymmetric, 1050); printf("6\n"); TraverseFirst(left_asymmetric, right_deep_asymmetric, 65536); printf("7\n"); String::Flatten(isolate, left_asymmetric); printf("10\n"); Traverse(flat, left_asymmetric); printf("11\n"); String::Flatten(isolate, right_asymmetric); printf("12\n"); Traverse(flat, right_asymmetric); printf("14\n"); String::Flatten(isolate, symmetric); printf("15\n"); Traverse(flat, symmetric); printf("16\n"); String::Flatten(isolate, left_deep_asymmetric); printf("18\n"); } TEST(ConsStringWithEmptyFirstFlatten) { printf("ConsStringWithEmptyFirstFlatten\n"); CcTest::InitializeVM(); v8::HandleScope scope(CcTest::isolate()); Isolate* isolate = CcTest::i_isolate(); i::Handle<i::String> initial_fst = isolate->factory()->NewStringFromAsciiChecked("fst012345"); i::Handle<i::String> initial_snd = isolate->factory()->NewStringFromAsciiChecked("snd012345"); i::Handle<i::String> str = isolate->factory() ->NewConsString(initial_fst, initial_snd) .ToHandleChecked(); CHECK(str->IsConsString()); auto cons = i::Handle<i::ConsString>::cast(str); const int initial_length = cons->length(); // set_first / set_second does not update the length (which the heap verifier // checks), so we need to ensure the length stays the same. i::Handle<i::String> new_fst = isolate->factory()->empty_string(); i::Handle<i::String> new_snd = isolate->factory()->NewStringFromAsciiChecked("snd012345012345678"); cons->set_first(*new_fst); cons->set_second(*new_snd); CHECK(!cons->IsFlat()); CHECK_EQ(initial_length, new_fst->length() + new_snd->length()); CHECK_EQ(initial_length, cons->length()); // Make sure Flatten doesn't alloc a new string. DisallowHeapAllocation no_alloc; i::Handle<i::String> flat = i::String::Flatten(isolate, cons); CHECK(flat->IsFlat()); CHECK_EQ(initial_length, flat->length()); } static void VerifyCharacterStream(String flat_string, String cons_string) { // Do not want to test ConString traversal on flat string. CHECK(flat_string.IsFlat() && !flat_string.IsConsString()); CHECK(cons_string.IsConsString()); // TODO(dcarney) Test stream reset as well. int length = flat_string.length(); // Iterate start search in multiple places in the string. int outer_iterations = length > 20 ? 20 : length; for (int j = 0; j <= outer_iterations; j++) { int offset = length * j / outer_iterations; if (offset < 0) offset = 0; // Want to test the offset == length case. if (offset > length) offset = length; StringCharacterStream flat_stream(flat_string, offset); StringCharacterStream cons_stream(cons_string, offset); for (int i = offset; i < length; i++) { uint16_t c = flat_string.Get(i); CHECK(flat_stream.HasMore()); CHECK(cons_stream.HasMore()); CHECK_EQ(c, flat_stream.GetNext()); CHECK_EQ(c, cons_stream.GetNext()); } CHECK(!flat_stream.HasMore()); CHECK(!cons_stream.HasMore()); } } static inline void PrintStats(const ConsStringGenerationData& data) { #ifdef DEBUG printf("%s: [%u], %s: [%u], %s: [%u], %s: [%u], %s: [%u], %s: [%u]\n", "leaves", data.stats_.leaves_, "empty", data.stats_.empty_leaves_, "chars", data.stats_.chars_, "lefts", data.stats_.left_traversals_, "rights", data.stats_.right_traversals_, "early_terminations", data.early_terminations_); #endif } template<typename BuildString> void TestStringCharacterStream(BuildString build, int test_cases) { FLAG_gc_global = true; CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); HandleScope outer_scope(isolate); ConsStringGenerationData data(true); for (int i = 0; i < test_cases; i++) { printf("%d\n", i); HandleScope inner_scope(isolate); AlwaysAllocateScope always_allocate(isolate); // Build flat version of cons string. Handle<String> flat_string = build(i, &data); ConsStringStats flat_string_stats; AccumulateStats(flat_string, &flat_string_stats); // Flatten string. String::Flatten(isolate, flat_string); // Build unflattened version of cons string to test. Handle<String> cons_string = build(i, &data); ConsStringStats cons_string_stats; AccumulateStats(cons_string, &cons_string_stats); DisallowHeapAllocation no_allocation; PrintStats(data); // Full verify of cons string. cons_string_stats.VerifyEqual(flat_string_stats); cons_string_stats.VerifyEqual(data.stats_); VerifyConsString(cons_string, &data); String flat_string_ptr = flat_string->IsConsString() ? ConsString::cast(*flat_string).first() : *flat_string; VerifyCharacterStream(flat_string_ptr, *cons_string); } } static const int kCharacterStreamNonRandomCases = 8; static Handle<String> BuildEdgeCaseConsString(int test_case, ConsStringGenerationData* data) { Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); data->Reset(); switch (test_case) { case 0: return ConstructBalanced(data, 71); case 1: return ConstructLeft(data, 71); case 2: return ConstructRight(data, 71); case 3: return ConstructLeft(data, 10); case 4: return ConstructRight(data, 10); case 5: // 2 element balanced tree. data->stats_.chars_ += data->block(0)->length(); data->stats_.chars_ += data->block(1)->length(); data->stats_.leaves_ += 2; return factory->NewConsString(data->block(0), data->block(1)) .ToHandleChecked(); case 6: // Simple flattened tree. data->stats_.chars_ += data->block(0)->length(); data->stats_.chars_ += data->block(1)->length(); data->stats_.leaves_ += 2; data->stats_.empty_leaves_ += 1; { Handle<String> string = factory->NewConsString(data->block(0), data->block(1)) .ToHandleChecked(); String::Flatten(isolate, string); return string; } case 7: // Left node flattened. data->stats_.chars_ += data->block(0)->length(); data->stats_.chars_ += data->block(1)->length(); data->stats_.chars_ += data->block(2)->length(); data->stats_.leaves_ += 3; data->stats_.empty_leaves_ += 1; data->stats_.left_traversals_ += 1; { Handle<String> left = factory->NewConsString(data->block(0), data->block(1)) .ToHandleChecked(); String::Flatten(isolate, left); return factory->NewConsString(left, data->block(2)).ToHandleChecked(); } case 8: // Left node and right node flattened. data->stats_.chars_ += data->block(0)->length(); data->stats_.chars_ += data->block(1)->length(); data->stats_.chars_ += data->block(2)->length(); data->stats_.chars_ += data->block(3)->length(); data->stats_.leaves_ += 4; data->stats_.empty_leaves_ += 2; data->stats_.left_traversals_ += 1; data->stats_.right_traversals_ += 1; { Handle<String> left = factory->NewConsString(data->block(0), data->block(1)) .ToHandleChecked(); String::Flatten(isolate, left); Handle<String> right = factory->NewConsString(data->block(2), data->block(2)) .ToHandleChecked(); String::Flatten(isolate, right); return factory->NewConsString(left, right).ToHandleChecked(); } } UNREACHABLE(); } TEST(StringCharacterStreamEdgeCases) { printf("TestStringCharacterStreamEdgeCases\n"); TestStringCharacterStream( BuildEdgeCaseConsString, kCharacterStreamNonRandomCases); } static const int kBalances = 3; static const int kTreeLengths = 4; static const int kEmptyLeaves = 4; static const int kUniqueRandomParameters = kBalances*kTreeLengths*kEmptyLeaves; static void InitializeGenerationData( int test_case, ConsStringGenerationData* data) { // Clear the settings and reinit the rng. data->Reset(); // Spin up the rng to a known location that is unique per test. static const int kPerTestJump = 501; for (int j = 0; j < test_case*kPerTestJump; j++) { data->rng_.next(); } // Choose balanced, left or right heavy trees. switch (test_case % kBalances) { case 0: // Nothing to do. Already balanced. break; case 1: // Left balanced. data->leftness_ = 0.90; data->rightness_ = 0.15; break; case 2: // Right balanced. data->leftness_ = 0.15; data->rightness_ = 0.90; break; default: UNREACHABLE(); } // Must remove the influence of the above decision. test_case /= kBalances; // Choose tree length. switch (test_case % kTreeLengths) { case 0: data->max_leaves_ = 16; data->early_termination_threshold_ = 0.2; break; case 1: data->max_leaves_ = 50; data->early_termination_threshold_ = 0.05; break; case 2: data->max_leaves_ = 500; data->early_termination_threshold_ = 0.03; break; case 3: data->max_leaves_ = 5000; data->early_termination_threshold_ = 0.001; break; default: UNREACHABLE(); } // Must remove the influence of the above decision. test_case /= kTreeLengths; // Choose how much we allow empty nodes, including not at all. data->empty_leaf_threshold_ = 0.03 * static_cast<double>(test_case % kEmptyLeaves); } static Handle<String> BuildRandomConsString( int test_case, ConsStringGenerationData* data) { InitializeGenerationData(test_case, data); return ConstructRandomString(data, 200); } TEST(StringCharacterStreamRandom) { printf("StringCharacterStreamRandom\n"); TestStringCharacterStream(BuildRandomConsString, kUniqueRandomParameters*7); } static const int kDeepOneByteDepth = 100000; TEST(DeepOneByte) { CcTest::InitializeVM(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); v8::HandleScope scope(CcTest::isolate()); char* foo = NewArray<char>(kDeepOneByteDepth); for (int i = 0; i < kDeepOneByteDepth; i++) { foo[i] = "foo "[i % 4]; } Handle<String> string = factory->NewStringFromOneByte(OneByteVector(foo, kDeepOneByteDepth)) .ToHandleChecked(); Handle<String> foo_string = factory->NewStringFromStaticChars("foo"); for (int i = 0; i < kDeepOneByteDepth; i += 10) { string = factory->NewConsString(string, foo_string).ToHandleChecked(); } Handle<String> flat_string = factory->NewConsString(string, foo_string).ToHandleChecked(); String::Flatten(isolate, flat_string); for (int i = 0; i < 500; i++) { TraverseFirst(flat_string, string, kDeepOneByteDepth); } DeleteArray<char>(foo); } TEST(Utf8Conversion) { // Smoke test for converting strings to utf-8. CcTest::InitializeVM(); v8::HandleScope handle_scope(CcTest::isolate()); // A simple one-byte string const char* one_byte_string = "abcdef12345"; int len = v8::String::NewFromUtf8(CcTest::isolate(), one_byte_string, v8::NewStringType::kNormal, static_cast<int>(strlen(one_byte_string))) .ToLocalChecked() ->Utf8Length(CcTest::isolate()); CHECK_EQ(strlen(one_byte_string), len); // A mixed one-byte and two-byte string // U+02E4 -> CB A4 // U+0064 -> 64 // U+12E4 -> E1 8B A4 // U+0030 -> 30 // U+3045 -> E3 81 85 const uint16_t mixed_string[] = {0x02E4, 0x0064, 0x12E4, 0x0030, 0x3045}; // The characters we expect to be output const unsigned char as_utf8[11] = {0xCB, 0xA4, 0x64, 0xE1, 0x8B, 0xA4, 0x30, 0xE3, 0x81, 0x85, 0x00}; // The number of bytes expected to be written for each length const int lengths[12] = {0, 0, 2, 3, 3, 3, 6, 7, 7, 7, 10, 11}; const int char_lengths[12] = {0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 5}; v8::Local<v8::String> mixed = v8::String::NewFromTwoByte(CcTest::isolate(), mixed_string, v8::NewStringType::kNormal, 5) .ToLocalChecked(); CHECK_EQ(10, mixed->Utf8Length(CcTest::isolate())); // Try encoding the string with all capacities char buffer[11]; const char kNoChar = static_cast<char>(-1); for (int i = 0; i <= 11; i++) { // Clear the buffer before reusing it for (int j = 0; j < 11; j++) buffer[j] = kNoChar; int chars_written; int written = mixed->WriteUtf8(CcTest::isolate(), buffer, i, &chars_written); CHECK_EQ(lengths[i], written); CHECK_EQ(char_lengths[i], chars_written); // Check that the contents are correct for (int j = 0; j < lengths[i]; j++) CHECK_EQ(as_utf8[j], static_cast<unsigned char>(buffer[j])); // Check that the rest of the buffer hasn't been touched for (int j = lengths[i]; j < 11; j++) CHECK_EQ(kNoChar, buffer[j]); } } TEST(Utf8ConversionPerf) { // Smoke test for converting strings to utf-8. LocalContext context; v8::HandleScope handle_scope(CcTest::isolate()); v8::Local<v8::String> ascii_string = CompileRun("'abc'.repeat(1E6)").As<v8::String>(); v8::Local<v8::String> one_byte_string = CompileRun("'\\u0255\\u0254\\u0253'.repeat(1E6)").As<v8::String>(); v8::Local<v8::String> two_byte_string = CompileRun("'\\u2255\\u2254\\u2253'.repeat(1E6)").As<v8::String>(); v8::Local<v8::String> surrogate_string = CompileRun("'\\u{12345}\\u2244'.repeat(1E6)").As<v8::String>(); int size = 1E7; char* buffer = new char[4 * size]; { v8::base::ElapsedTimer timer; timer.Start(); ascii_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("ascii string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); ascii_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("ascii string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); ascii_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr); printf("ascii string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); one_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("one byte string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); one_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("one byte string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); one_byte_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr); printf("one byte string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); two_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("two byte string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); two_byte_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("two byte string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); two_byte_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr); printf("two byte string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); surrogate_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("surrogate string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); surrogate_string->WriteUtf8(CcTest::isolate(), buffer, size, nullptr); printf("surrogate string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } { v8::base::ElapsedTimer timer; timer.Start(); surrogate_string->WriteUtf8(CcTest::isolate(), buffer, 4 * size, nullptr); printf("surrogate string %0.3f\n", timer.Elapsed().InMillisecondsF()); timer.Stop(); } delete[] buffer; } TEST(ExternalShortStringAdd) { LocalContext context; v8::HandleScope handle_scope(CcTest::isolate()); // Make sure we cover all always-flat lengths and at least one above. static const int kMaxLength = 20; CHECK_GT(kMaxLength, i::ConsString::kMinLength); // Allocate two JavaScript arrays for holding short strings. v8::Local<v8::Array> one_byte_external_strings = v8::Array::New(CcTest::isolate(), kMaxLength + 1); v8::Local<v8::Array> non_one_byte_external_strings = v8::Array::New(CcTest::isolate(), kMaxLength + 1); // Generate short one-byte and two-byte external strings. for (int i = 0; i <= kMaxLength; i++) { char* one_byte = NewArray<char>(i + 1); for (int j = 0; j < i; j++) { one_byte[j] = 'a'; } // Terminating '\0' is left out on purpose. It is not required for external // string data. OneByteResource* one_byte_resource = new OneByteResource(one_byte, i); v8::Local<v8::String> one_byte_external_string = v8::String::NewExternalOneByte(CcTest::isolate(), one_byte_resource) .ToLocalChecked(); one_byte_external_strings->Set(context.local(), v8::Integer::New(CcTest::isolate(), i), one_byte_external_string) .FromJust(); uc16* non_one_byte = NewArray<uc16>(i + 1); for (int j = 0; j < i; j++) { non_one_byte[j] = 0x1234; } // Terminating '\0' is left out on purpose. It is not required for external // string data. Resource* resource = new Resource(non_one_byte, i); v8::Local<v8::String> non_one_byte_external_string = v8::String::NewExternalTwoByte(CcTest::isolate(), resource) .ToLocalChecked(); non_one_byte_external_strings->Set(context.local(), v8::Integer::New(CcTest::isolate(), i), non_one_byte_external_string) .FromJust(); } // Add the arrays with the short external strings in the global object. v8::Local<v8::Object> global = context->Global(); global->Set(context.local(), v8_str("external_one_byte"), one_byte_external_strings) .FromJust(); global->Set(context.local(), v8_str("external_non_one_byte"), non_one_byte_external_strings) .FromJust(); global->Set(context.local(), v8_str("max_length"), v8::Integer::New(CcTest::isolate(), kMaxLength)) .FromJust(); // Add short external one-byte and two-byte strings checking the result. static const char* source = "function test() {" " var one_byte_chars = 'aaaaaaaaaaaaaaaaaaaa';" " var non_one_byte_chars = " "'\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1" "234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\" "u1234';" // NOLINT " if (one_byte_chars.length != max_length) return 1;" " if (non_one_byte_chars.length != max_length) return 2;" " var one_byte = Array(max_length + 1);" " var non_one_byte = Array(max_length + 1);" " for (var i = 0; i <= max_length; i++) {" " one_byte[i] = one_byte_chars.substring(0, i);" " non_one_byte[i] = non_one_byte_chars.substring(0, i);" " };" " for (var i = 0; i <= max_length; i++) {" " if (one_byte[i] != external_one_byte[i]) return 3;" " if (non_one_byte[i] != external_non_one_byte[i]) return 4;" " for (var j = 0; j < i; j++) {" " if (external_one_byte[i] !=" " (external_one_byte[j] + external_one_byte[i - j])) return " "5;" " if (external_non_one_byte[i] !=" " (external_non_one_byte[j] + external_non_one_byte[i - " "j])) return 6;" " if (non_one_byte[i] != (non_one_byte[j] + non_one_byte[i - " "j])) return 7;" " if (one_byte[i] != (one_byte[j] + one_byte[i - j])) return 8;" " if (one_byte[i] != (external_one_byte[j] + one_byte[i - j])) " "return 9;" " if (one_byte[i] != (one_byte[j] + external_one_byte[i - j])) " "return 10;" " if (non_one_byte[i] !=" " (external_non_one_byte[j] + non_one_byte[i - j])) return " "11;" " if (non_one_byte[i] !=" " (non_one_byte[j] + external_non_one_byte[i - j])) return " "12;" " }" " }" " return 0;" "};" "test()"; CHECK_EQ(0, CompileRun(source)->Int32Value(context.local()).FromJust()); } TEST(ReplaceInvalidUtf8) { LocalContext context; v8::HandleScope handle_scope(CcTest::isolate()); v8::Local<v8::String> string = CompileRun("'ab\\ud800cd'").As<v8::String>(); char buffer[7]; memset(buffer, 0, 7); int chars_written = 0; int size = string->WriteUtf8(CcTest::isolate(), buffer, 7, &chars_written, v8::String::REPLACE_INVALID_UTF8); CHECK_EQ(7, size); CHECK_EQ(5, chars_written); CHECK_EQ(0, memcmp("\x61\x62\xef\xbf\xbd\x63\x64", buffer, 7)); memset(buffer, 0, 7); chars_written = 0; size = string->WriteUtf8(CcTest::isolate(), buffer, 6, &chars_written, v8::String::REPLACE_INVALID_UTF8); CHECK_EQ(6, size); CHECK_EQ(4, chars_written); CHECK_EQ(0, memcmp("\x61\x62\xef\xbf\xbd\x63", buffer, 6)); } TEST(JSONStringifySliceMadeExternal) { if (!FLAG_string_slices) return; CcTest::InitializeVM(); // Create a sliced string from a one-byte string. The latter is turned // into a two-byte external string. Check that JSON.stringify works. v8::HandleScope handle_scope(CcTest::isolate()); v8::Local<v8::String> underlying = CompileRun( "var underlying = 'abcdefghijklmnopqrstuvwxyz';" "underlying") ->ToString(CcTest::isolate()->GetCurrentContext()) .ToLocalChecked(); v8::Local<v8::String> slice = CompileRun( "var slice = '';" "slice = underlying.slice(1);" "slice") ->ToString(CcTest::isolate()->GetCurrentContext()) .ToLocalChecked(); CHECK(v8::Utils::OpenHandle(*slice)->IsSlicedString()); CHECK(v8::Utils::OpenHandle(*underlying)->IsSeqOneByteString()); int length = underlying->Length(); uc16* two_byte = NewArray<uc16>(length + 1); underlying->Write(CcTest::isolate(), two_byte); Resource* resource = new Resource(two_byte, length); CHECK(underlying->MakeExternal(resource)); CHECK(v8::Utils::OpenHandle(*slice)->IsSlicedString()); CHECK(v8::Utils::OpenHandle(*underlying)->IsExternalTwoByteString()); CHECK_EQ(0, strcmp("\"bcdefghijklmnopqrstuvwxyz\"", *v8::String::Utf8Value(CcTest::isolate(), CompileRun("JSON.stringify(slice)")))); } TEST(JSONStringifyWellFormed) { CcTest::InitializeVM(); v8::HandleScope handle_scope(CcTest::isolate()); v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext(); // Test some leading surrogates (U+D800 to U+DBFF). { // U+D800 CHECK_EQ( 0, strcmp("\"\\ud800\"", *v8::String::Utf8Value( CcTest::isolate(), CompileRun("JSON.stringify('\\uD800')")))); v8::Local<v8::String> json = v8_str("\"\\ud800\""); v8::Local<v8::Value> parsed = v8::JSON::Parse(context, json).ToLocalChecked(); CHECK(v8::JSON::Stringify(context, parsed) .ToLocalChecked() ->Equals(context, json) .FromJust()); } { // U+DAAA CHECK_EQ( 0, strcmp("\"\\udaaa\"", *v8::String::Utf8Value( CcTest::isolate(), CompileRun("JSON.stringify('\\uDAAA')")))); v8::Local<v8::String> json = v8_str("\"\\udaaa\""); v8::Local<v8::Value> parsed = v8::JSON::Parse(context, json).ToLocalChecked(); CHECK(v8::JSON::Stringify(context, parsed) .ToLocalChecked() ->Equals(context, json) .FromJust()); } { // U+DBFF CHECK_EQ( 0, strcmp("\"\\udbff\"", *v8::String::Utf8Value( CcTest::isolate(), CompileRun("JSON.stringify('\\uDBFF')")))); v8::Local<v8::String> json = v8_str("\"\\udbff\""); v8::Local<v8::Value> parsed = v8::JSON::Parse(context, json).ToLocalChecked(); CHECK(v8::JSON::Stringify(context, parsed) .ToLocalChecked() ->Equals(context, json) .FromJust()); } // Test some trailing surrogates (U+DC00 to U+DFFF). { // U+DC00 CHECK_EQ( 0, strcmp("\"\\udc00\"", *v8::String::Utf8Value( CcTest::isolate(), CompileRun("JSON.stringify('\\uDC00')")))); v8::Local<v8::String> json = v8_str("\"\\udc00\""); v8::Local<v8::Value> parsed = v8::JSON::Parse(context, json).ToLocalChecked(); CHECK(v8::JSON::Stringify(context, parsed) .ToLocalChecked() ->Equals(context, json) .FromJust()); } { // U+DDDD CHECK_EQ( 0, strcmp("\"\\udddd\"", *v8::String::Utf8Value( CcTest::isolate(), CompileRun("JSON.stringify('\\uDDDD')")))); v8::Local<v8::String> json = v8_str("\"\\udddd\""); v8::Local<v8::Value> parsed = v8::JSON::Parse(context, json).ToLocalChecked(); CHECK(v8::JSON::Stringify(context, parsed) .ToLocalChecked() ->Equals(context, json) .FromJust()); } { // U+DFFF CHECK_EQ( 0, strcmp("\"\\udfff\"", *v8::String::Utf8Value( CcTest::isolate(), CompileRun("JSON.stringify('\\uDFFF')")))); v8::Local<v8::String> json = v8_str("\"\\udfff\""); v8::Local<v8::Value> parsed = v8::JSON::Parse(context, json).ToLocalChecked(); CHECK(v8::JSON::Stringify(context, parsed) .ToLocalChecked() ->Equals(context, json) .FromJust()); } } TEST(CachedHashOverflow) { CcTest::InitializeVM(); // We incorrectly allowed strings to be tagged as array indices even if their // values didn't fit in the hash field. // See http://code.google.com/p/v8/issues/detail?id=728 Isolate* isolate = CcTest::i_isolate(); v8::HandleScope handle_scope(CcTest::isolate()); // Lines must be executed sequentially. Combining them into one script // makes the bug go away. const char* lines[] = {"var x = [];", "x[4] = 42;", "var s = \"1073741828\";", "x[s];", "x[s] = 37;", "x[4];", "x[s];"}; Handle<Smi> fortytwo(Smi::FromInt(42), isolate); Handle<Smi> thirtyseven(Smi::FromInt(37), isolate); Handle<Object> results[] = { isolate->factory()->undefined_value(), fortytwo, isolate->factory()->undefined_value(), isolate->factory()->undefined_value(), thirtyseven, fortytwo, thirtyseven // Bug yielded 42 here. }; v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext(); for (size_t i = 0; i < arraysize(lines); i++) { const char* line = lines[i]; printf("%s\n", line); v8::Local<v8::Value> result = v8::Script::Compile(context, v8::String::NewFromUtf8(CcTest::isolate(), line, v8::NewStringType::kNormal) .ToLocalChecked()) .ToLocalChecked() ->Run(context) .ToLocalChecked(); CHECK_EQ(results[i]->IsUndefined(CcTest::i_isolate()), result->IsUndefined()); CHECK_EQ(results[i]->IsNumber(), result->IsNumber()); if (result->IsNumber()) { int32_t value = 0; CHECK(results[i]->ToInt32(&value)); CHECK_EQ(value, result->ToInt32(context).ToLocalChecked()->Value()); } } } TEST(SliceFromCons) { if (!FLAG_string_slices) return; CcTest::InitializeVM(); Factory* factory = CcTest::i_isolate()->factory(); v8::HandleScope scope(CcTest::isolate()); Handle<String> string = factory->NewStringFromStaticChars("parentparentparent"); Handle<String> parent = factory->NewConsString(string, string).ToHandleChecked(); CHECK(parent->IsConsString()); CHECK(!parent->IsFlat()); Handle<String> slice = factory->NewSubString(parent, 1, 25); // After slicing, the original string becomes a flat cons. CHECK(parent->IsFlat()); CHECK(slice->IsSlicedString()); CHECK_EQ( SlicedString::cast(*slice).parent(), // Parent could have been short-circuited. parent->IsConsString() ? ConsString::cast(*parent).first() : *parent); CHECK(SlicedString::cast(*slice).parent().IsSeqString()); CHECK(slice->IsFlat()); } class OneByteVectorResource : public v8::String::ExternalOneByteStringResource { public: explicit OneByteVectorResource(i::Vector<const char> vector) : data_(vector) {} ~OneByteVectorResource() override = default; size_t length() const override { return data_.length(); } const char* data() const override { return data_.begin(); } private: i::Vector<const char> data_; }; TEST(InternalizeExternal) { #ifdef ENABLE_MINOR_MC // TODO(mlippautz): Remove once we add support for forwarding ThinStrings in // minor MC if (FLAG_minor_mc) return; #endif // ENABLE_MINOR_MC FLAG_stress_incremental_marking = false; FLAG_thin_strings = true; CcTest::InitializeVM(); i::Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); // This won't leak; the external string mechanism will call Dispose() on it. OneByteVectorResource* resource = new OneByteVectorResource(i::Vector<const char>("prop-1234", 9)); { v8::HandleScope scope(CcTest::isolate()); v8::Local<v8::String> ext_string = v8::String::NewExternalOneByte(CcTest::isolate(), resource) .ToLocalChecked(); Handle<String> string = v8::Utils::OpenHandle(*ext_string); CHECK(string->IsExternalString()); CHECK(!string->IsInternalizedString()); CHECK(!i::Heap::InYoungGeneration(*string)); CHECK_EQ( isolate->factory()->string_table()->LookupStringIfExists_NoAllocate( isolate, string->ptr()), Smi::FromInt(ResultSentinel::kNotFound).ptr()); factory->InternalizeName(string); CHECK(string->IsExternalString()); CHECK(string->IsInternalizedString()); CHECK(!i::Heap::InYoungGeneration(*string)); } CcTest::CollectGarbage(i::OLD_SPACE); CcTest::CollectGarbage(i::OLD_SPACE); } TEST(SliceFromExternal) { if (!FLAG_string_slices) return; CcTest::InitializeVM(); Factory* factory = CcTest::i_isolate()->factory(); v8::HandleScope scope(CcTest::isolate()); OneByteVectorResource resource( i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26)); Handle<String> string = factory->NewExternalStringFromOneByte(&resource).ToHandleChecked(); CHECK(string->IsExternalString()); Handle<String> slice = factory->NewSubString(string, 1, 25); CHECK(slice->IsSlicedString()); CHECK(string->IsExternalString()); CHECK_EQ(SlicedString::cast(*slice).parent(), *string); CHECK(SlicedString::cast(*slice).parent().IsExternalString()); CHECK(slice->IsFlat()); // This avoids the GC from trying to free stack allocated resources. i::Handle<i::ExternalOneByteString>::cast(string)->SetResource( CcTest::i_isolate(), nullptr); } TEST(TrivialSlice) { // This tests whether a slice that contains the entire parent string // actually creates a new string (it should not). if (!FLAG_string_slices) return; CcTest::InitializeVM(); Factory* factory = CcTest::i_isolate()->factory(); v8::HandleScope scope(CcTest::isolate()); v8::Local<v8::Value> result; Handle<String> string; const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';"; const char* check = "str.slice(0,26)"; const char* crosscheck = "str.slice(1,25)"; CompileRun(init); result = CompileRun(check); CHECK(result->IsString()); string = v8::Utils::OpenHandle(v8::String::Cast(*result)); CHECK(!string->IsSlicedString()); string = factory->NewSubString(string, 0, 26); CHECK(!string->IsSlicedString()); result = CompileRun(crosscheck); CHECK(result->IsString()); string = v8::Utils::OpenHandle(v8::String::Cast(*result)); CHECK(string->IsSlicedString()); CHECK_EQ(0, strcmp("bcdefghijklmnopqrstuvwxy", string->ToCString().get())); } TEST(SliceFromSlice) { // This tests whether a slice that contains the entire parent string // actually creates a new string (it should not). if (!FLAG_string_slices) return; CcTest::InitializeVM(); v8::HandleScope scope(CcTest::isolate()); v8::Local<v8::Value> result; Handle<String> string; const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';"; const char* slice = "var slice = ''; slice = str.slice(1,-1); slice"; const char* slice_from_slice = "slice.slice(1,-1);"; CompileRun(init); result = CompileRun(slice); CHECK(result->IsString()); string = v8::Utils::OpenHandle(v8::String::Cast(*result)); CHECK(string->IsSlicedString()); CHECK(SlicedString::cast(*string).parent().IsSeqString()); CHECK_EQ(0, strcmp("bcdefghijklmnopqrstuvwxy", string->ToCString().get())); result = CompileRun(slice_from_slice); CHECK(result->IsString()); string = v8::Utils::OpenHandle(v8::String::Cast(*result)); CHECK(string->IsSlicedString()); CHECK(SlicedString::cast(*string).parent().IsSeqString()); CHECK_EQ(0, strcmp("cdefghijklmnopqrstuvwx", string->ToCString().get())); } UNINITIALIZED_TEST(OneByteArrayJoin) { v8::Isolate::CreateParams create_params; // Set heap limits. create_params.constraints.set_max_young_generation_size_in_bytes(3 * MB); #ifdef DEBUG create_params.constraints.set_max_old_generation_size_in_bytes(20 * MB); #else create_params.constraints.set_max_old_generation_size_in_bytes(7 * MB); #endif create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); v8::Isolate* isolate = v8::Isolate::New(create_params); isolate->Enter(); { // String s is made of 2^17 = 131072 'c' characters and a is an array // starting with 'bad', followed by 2^14 times the string s. That means the // total length of the concatenated strings is 2^31 + 3. So on 32bit systems // summing the lengths of the strings (as Smis) overflows and wraps. LocalContext context(isolate); v8::HandleScope scope(isolate); v8::TryCatch try_catch(isolate); CHECK(CompileRun( "var two_14 = Math.pow(2, 14);" "var two_17 = Math.pow(2, 17);" "var s = Array(two_17 + 1).join('c');" "var a = ['bad'];" "for (var i = 1; i <= two_14; i++) a.push(s);" "a.join(" ");").IsEmpty()); CHECK(try_catch.HasCaught()); } isolate->Exit(); isolate->Dispose(); } namespace { int* global_use_counts = nullptr; void MockUseCounterCallback(v8::Isolate* isolate, v8::Isolate::UseCounterFeature feature) { ++global_use_counts[feature]; } } TEST(CountBreakIterator) { CcTest::InitializeVM(); v8::HandleScope scope(CcTest::isolate()); LocalContext context; int use_counts[v8::Isolate::kUseCounterFeatureCount] = {}; global_use_counts = use_counts; CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback); CHECK_EQ(0, use_counts[v8::Isolate::kBreakIterator]); v8::Local<v8::Value> result = CompileRun( "(function() {" " if (!this.Intl) return 0;" " var iterator = Intl.v8BreakIterator(['en']);" " iterator.adoptText('Now is the time');" " iterator.next();" " return iterator.next();" "})();"); CHECK(result->IsNumber()); int uses = result->ToInt32(context.local()).ToLocalChecked()->Value() == 0 ? 0 : 1; CHECK_EQ(uses, use_counts[v8::Isolate::kBreakIterator]); // Make sure GC cleans up the break iterator, so we don't get a memory leak // reported by ASAN. CcTest::isolate()->LowMemoryNotification(); } TEST(StringReplaceAtomTwoByteResult) { CcTest::InitializeVM(); v8::HandleScope scope(CcTest::isolate()); LocalContext context; v8::Local<v8::Value> result = CompileRun( "var subject = 'one_byte~only~string~'; " "var replace = '\x80'; " "subject.replace(/~/g, replace); "); CHECK(result->IsString()); Handle<String> string = v8::Utils::OpenHandle(v8::String::Cast(*result)); CHECK(string->IsTwoByteRepresentation()); v8::Local<v8::String> expected = v8_str("one_byte\x80only\x80string\x80"); CHECK(expected->Equals(context.local(), result).FromJust()); } TEST(IsAscii) { CHECK(String::IsAscii(static_cast<char*>(nullptr), 0)); CHECK(String::IsOneByte(static_cast<uc16*>(nullptr), 0)); } template<typename Op, bool return_first> static uint16_t ConvertLatin1(uint16_t c) { uint32_t result[Op::kMaxWidth]; int chars; chars = Op::Convert(c, 0, result, nullptr); if (chars == 0) return 0; CHECK_LE(chars, static_cast<int>(sizeof(result))); if (!return_first && chars > 1) { return 0; } return result[0]; } #ifndef V8_INTL_SUPPORT static void CheckCanonicalEquivalence(uint16_t c, uint16_t test) { uint16_t expect = ConvertLatin1<unibrow::Ecma262UnCanonicalize, true>(c); if (expect > unibrow::Latin1::kMaxChar || expect == 0) expect = c; CHECK_EQ(expect, test); } TEST(Latin1IgnoreCase) { for (uint16_t c = unibrow::Latin1::kMaxChar + 1; c != 0; c++) { uint16_t lower = ConvertLatin1<unibrow::ToLowercase, false>(c); uint16_t upper = ConvertLatin1<unibrow::ToUppercase, false>(c); uint16_t test = unibrow::Latin1::TryConvertToLatin1(c); // Filter out all character whose upper is not their lower or vice versa. if (lower == 0 && upper == 0) { CheckCanonicalEquivalence(c, test); continue; } if (lower > unibrow::Latin1::kMaxChar && upper > unibrow::Latin1::kMaxChar) { CheckCanonicalEquivalence(c, test); continue; } if (lower == 0 && upper != 0) { lower = ConvertLatin1<unibrow::ToLowercase, false>(upper); } if (upper == 0 && lower != c) { upper = ConvertLatin1<unibrow::ToUppercase, false>(lower); } if (lower > unibrow::Latin1::kMaxChar && upper > unibrow::Latin1::kMaxChar) { CheckCanonicalEquivalence(c, test); continue; } if (upper != c && lower != c) { CheckCanonicalEquivalence(c, test); continue; } CHECK_EQ(Min(upper, lower), test); } } #endif class DummyResource: public v8::String::ExternalStringResource { public: const uint16_t* data() const override { return nullptr; } size_t length() const override { return 1 << 30; } }; class DummyOneByteResource: public v8::String::ExternalOneByteStringResource { public: const char* data() const override { return nullptr; } size_t length() const override { return 1 << 30; } }; TEST(InvalidExternalString) { CcTest::InitializeVM(); LocalContext context; Isolate* isolate = CcTest::i_isolate(); { HandleScope scope(isolate); DummyOneByteResource r; CHECK(isolate->factory()->NewExternalStringFromOneByte(&r).is_null()); CHECK(isolate->has_pending_exception()); isolate->clear_pending_exception(); } { HandleScope scope(isolate); DummyResource r; CHECK(isolate->factory()->NewExternalStringFromTwoByte(&r).is_null()); CHECK(isolate->has_pending_exception()); isolate->clear_pending_exception(); } } #define INVALID_STRING_TEST(FUN, TYPE) \ TEST(StringOOM##FUN) { \ CcTest::InitializeVM(); \ LocalContext context; \ Isolate* isolate = CcTest::i_isolate(); \ STATIC_ASSERT(String::kMaxLength < kMaxInt); \ static const int invalid = String::kMaxLength + 1; \ HandleScope scope(isolate); \ Vector<TYPE> dummy = Vector<TYPE>::New(invalid); \ memset(dummy.begin(), 0x0, dummy.length() * sizeof(TYPE)); \ CHECK(isolate->factory()->FUN(Vector<const TYPE>::cast(dummy)).is_null()); \ memset(dummy.begin(), 0x20, dummy.length() * sizeof(TYPE)); \ CHECK(isolate->has_pending_exception()); \ isolate->clear_pending_exception(); \ dummy.Dispose(); \ } INVALID_STRING_TEST(NewStringFromUtf8, char) INVALID_STRING_TEST(NewStringFromOneByte, uint8_t) #undef INVALID_STRING_TEST TEST(FormatMessage) { CcTest::InitializeVM(); LocalContext context; Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); Handle<String> arg0 = isolate->factory()->NewStringFromAsciiChecked("arg0"); Handle<String> arg1 = isolate->factory()->NewStringFromAsciiChecked("arg1"); Handle<String> arg2 = isolate->factory()->NewStringFromAsciiChecked("arg2"); Handle<String> result = MessageFormatter::Format(isolate, MessageTemplate::kPropertyNotFunction, arg0, arg1, arg2) .ToHandleChecked(); Handle<String> expected = isolate->factory()->NewStringFromAsciiChecked( "'arg0' returned for property 'arg1' of object 'arg2' is not a function"); CHECK(String::Equals(isolate, result, expected)); } TEST(Regress609831) { CcTest::InitializeVM(); LocalContext context; Isolate* isolate = CcTest::i_isolate(); { HandleScope scope(isolate); v8::Local<v8::Value> result = CompileRun( "String.fromCharCode(32, 32, 32, 32, 32, " "32, 32, 32, 32, 32, 32, 32, 32, 32, 32, " "32, 32, 32, 32, 32, 32, 32, 32, 32, 32)"); CHECK(v8::Utils::OpenHandle(*result)->IsSeqOneByteString()); } { HandleScope scope(isolate); v8::Local<v8::Value> result = CompileRun( "String.fromCharCode(432, 432, 432, 432, 432, " "432, 432, 432, 432, 432, 432, 432, 432, 432, " "432, 432, 432, 432, 432, 432, 432, 432, 432)"); CHECK(v8::Utils::OpenHandle(*result)->IsSeqTwoByteString()); } } TEST(ExternalStringIndexOf) { CcTest::InitializeVM(); LocalContext context; v8::HandleScope scope(CcTest::isolate()); const char* raw_string = "abcdefghijklmnopqrstuvwxyz"; v8::Local<v8::String> string = v8::String::NewExternalOneByte(CcTest::isolate(), new StaticOneByteResource(raw_string)) .ToLocalChecked(); v8::Local<v8::Object> global = context->Global(); global->Set(context.local(), v8_str("external"), string).FromJust(); char source[] = "external.indexOf('%')"; for (size_t i = 0; i < strlen(raw_string); i++) { source[18] = raw_string[i]; int result_position = static_cast<int>(i); CHECK_EQ(result_position, CompileRun(source)->Int32Value(context.local()).FromJust()); } CHECK_EQ(-1, CompileRun("external.indexOf('abcdefghijklmnopqrstuvwxyz%%%%%%')") ->Int32Value(context.local()) .FromJust()); CHECK_EQ(1, CompileRun("external.indexOf('', 1)") ->Int32Value(context.local()) .FromJust()); CHECK_EQ(-1, CompileRun("external.indexOf('a', 1)") ->Int32Value(context.local()) .FromJust()); CHECK_EQ(-1, CompileRun("external.indexOf('$')") ->Int32Value(context.local()) .FromJust()); } #define GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING(NAME, STRING) \ TEST(GCInsideNewStringFromUtf8SubStringWith##NAME) { \ CcTest::InitializeVM(); \ LocalContext context; \ v8::HandleScope scope(CcTest::isolate()); \ Factory* factory = CcTest::i_isolate()->factory(); \ /* Length must be bigger than the buffer size of the Utf8Decoder. */ \ const char* buf = STRING; \ size_t len = strlen(buf); \ Handle<String> main_string = \ factory \ ->NewStringFromOneByte(Vector<const uint8_t>( \ reinterpret_cast<const uint8_t*>(buf), len)) \ .ToHandleChecked(); \ CHECK(Heap::InYoungGeneration(*main_string)); \ /* Next allocation will cause GC. */ \ heap::SimulateFullSpace(CcTest::i_isolate()->heap()->new_space()); \ /* Offset by two to check substring-ing. */ \ Handle<String> s = factory \ ->NewStringFromUtf8SubString( \ Handle<SeqOneByteString>::cast(main_string), 2, \ static_cast<int>(len - 2)) \ .ToHandleChecked(); \ Handle<String> expected_string = \ factory->NewStringFromUtf8(Vector<const char>(buf + 2, len - 2)) \ .ToHandleChecked(); \ CHECK(s->Equals(*expected_string)); \ } GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING( OneByte, "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ") GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING( TwoByte, "QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ" "QQ\xF0\x9F\x98\x8D\xF0\x9F\x98\x8D") #undef GC_INSIDE_NEW_STRING_FROM_UTF8_SUB_STRING TEST(HashArrayIndexStrings) { CcTest::InitializeVM(); LocalContext context; v8::HandleScope scope(CcTest::isolate()); i::Isolate* isolate = CcTest::i_isolate(); CHECK_EQ(StringHasher::MakeArrayIndexHash(0 /* value */, 1 /* length */) >> Name::kHashShift, isolate->factory()->zero_string()->Hash()); CHECK_EQ(StringHasher::MakeArrayIndexHash(1 /* value */, 1 /* length */) >> Name::kHashShift, isolate->factory()->one_string()->Hash()); } TEST(StringEquals) { v8::V8::Initialize(); v8::Isolate* isolate = CcTest::isolate(); v8::HandleScope scope(isolate); auto foo_str = v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal) .ToLocalChecked(); auto bar_str = v8::String::NewFromUtf8(isolate, "bar", v8::NewStringType::kNormal) .ToLocalChecked(); auto foo_str2 = v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal) .ToLocalChecked(); uint16_t* two_byte_source = AsciiToTwoByteString("foo"); auto foo_two_byte_str = v8::String::NewFromTwoByte(isolate, two_byte_source, v8::NewStringType::kNormal) .ToLocalChecked(); i::DeleteArray(two_byte_source); CHECK(foo_str->StringEquals(foo_str)); CHECK(!foo_str->StringEquals(bar_str)); CHECK(foo_str->StringEquals(foo_str2)); CHECK(foo_str->StringEquals(foo_two_byte_str)); CHECK(!bar_str->StringEquals(foo_str2)); } class OneByteStringResource : public v8::String::ExternalOneByteStringResource { public: // Takes ownership of |data|. OneByteStringResource(char* data, size_t length) : data_(data), length_(length) {} ~OneByteStringResource() override { delete[] data_; } const char* data() const override { return data_; } size_t length() const override { return length_; } private: char* data_; size_t length_; }; TEST(Regress876759) { v8::V8::Initialize(); Isolate* isolate = CcTest::i_isolate(); Factory* factory = isolate->factory(); HandleScope handle_scope(isolate); const int kLength = 30; uc16 two_byte_buf[kLength]; char* external_one_byte_buf = new char[kLength]; for (int j = 0; j < kLength; j++) { char c = '0' + (j % 10); two_byte_buf[j] = c; external_one_byte_buf[j] = c; } Handle<String> parent; { Handle<SeqTwoByteString> raw = factory->NewRawTwoByteString(kLength).ToHandleChecked(); DisallowHeapAllocation no_gc; CopyChars(raw->GetChars(no_gc), two_byte_buf, kLength); parent = raw; } CHECK(parent->IsTwoByteRepresentation()); Handle<String> sliced = factory->NewSubString(parent, 1, 20); CHECK(sliced->IsSlicedString()); factory->InternalizeString(parent); CHECK(parent->IsThinString()); Handle<String> grandparent = handle(ThinString::cast(*parent).actual(), isolate); CHECK_EQ(*parent, SlicedString::cast(*sliced).parent()); OneByteStringResource* resource = new OneByteStringResource(external_one_byte_buf, kLength); grandparent->MakeExternal(resource); // The grandparent string becomes one-byte, but the child strings are still // two-byte. CHECK(grandparent->IsOneByteRepresentation()); CHECK(parent->IsTwoByteRepresentation()); CHECK(sliced->IsTwoByteRepresentation()); // The *Underneath version returns the correct representation. CHECK(String::IsOneByteRepresentationUnderneath(*sliced)); } } // namespace test_strings } // namespace internal } // namespace v8
__label__pos
0.986937
Jump to content • Checkout • Login • Get in touch osCommerce The e-commerce. manufacturer infobox list.. hashed together, but not quite there. help? adibranch Recommended Posts Hi all, i'm replacing the dropdown manufacturer filter on product list pages with one which produces a list instead of the form drop down. I've nearly got it, but are not quite there with the sort option. The current filter produces a url such as "index.php?cPath=42_1449_45&sort=3a&filter_id=58" , but mine only returns "/index.php?cPath=42_1449_45?sort=&filter_id=58" , ie its missing the sort I.D . I'm not much a coder, more of a bodger, but have hashed together the following based on the original form filter... <?php if (PRODUCT_LIST_FILTER > 0) { if (isset($HTTP_GET_VARS['manufacturers_id'])) { $filterlist_sql = "select distinct c.categories_id as id, cd.categories_name as name from " . TABLE_PRODUCTS . " p, " . TABLE_PRODUCTS_TO_CATEGORIES . " p2c, " . TABLE_CATEGORIES . " c, " . TABLE_CATEGORIES_DESCRIPTION . " cd where p.products_status = '1' and p.products_id = p2c.products_id and p2c.categories_id = c.categories_id and p2c.categories_id = cd.categories_id and cd.language_id = '" . (int)$languages_id . "' and p.manufacturers_id = '" . (int)$HTTP_GET_VARS['manufacturers_id'] . "' order by cd.categories_name"; } else { $filterlist_sql= "select distinct m.manufacturers_id as id, m.manufacturers_name as name from " . TABLE_PRODUCTS . " p, " . TABLE_PRODUCTS_TO_CATEGORIES . " p2c, " . TABLE_MANUFACTURERS . " m where p.products_status = '1' and p.manufacturers_id = m.manufacturers_id and p.products_id = p2c.products_id and p2c.categories_id = '" . (int)$current_category_id . "' order by m.manufacturers_name"; } $filterlist_query = tep_db_query($filterlist_sql); if (tep_db_num_rows($filterlist_query) > 1) { echo '<div>'; while ($filterlist = tep_db_fetch_array($filterlist_query)) { $filterid = $filterlist['id']; $manuname = $filterlist['name']; echo '<a href="' . tep_href_link(FILENAME_DEFAULT, 'cPath=' . substr(tep_get_path($category_id), 6)) . '?sort=' . $HTTP_GET_VARS['sort'] . '&filter_id=' . $filterid . '">' . $manuname . '</a><br>'; } } echo '</div>'; } ?> Link to comment Share on other sites if (isset($HTTP_GET_VARS['filter_id']) && !empty($HTTP_GET_VARS['filter_id'])) { // manufacturer list $manufacturers_list = '<ul>'; while ($filterlist = tep_db_fetch_array($filterlist_query)) { $manufacturers_name = ((strlen($filterlist['name']) > MAX_DISPLAY_MANUFACTURER_NAME_LEN) ? substr($filterlist['name'], 0, MAX_DISPLAY_MANUFACTURER_NAME_LEN) . '..' : $filterlist['name']); if (isset($HTTP_GET_VARS['filter_id']) && ($HTTP_GET_VARS['filter_id'] == $filterlist['id'])) $manufacturers_name = '<strong>' . $manufacturers_name .'</strong>'; $manufacturers_list .= '<li><a href="' . tep_href_link(FILENAME_DEFAULT, tep_get_all_get_params(array('filter_id')) . 'filter_id=' . $filterlist['id']) . '">' . $manufacturers_name . '</a></li>'; } $manufacturers_list .= '</ul>'; echo $manufacturers_list; // in case re-use the SQL query mysql_data_seek($filterlist_query, 0); } Link to comment Share on other sites Thanks ! that works.. kinda.. (for reference, this is being put in an infobox). Yours only showed up the list when a manufacturer had already been selected. So, I couldnt use it as I couldn't suss how to fix it. Either way, I've now removed the sort part of the URL as the resulting page canonical doesn't include, it, and it obviously has to be the same. So, I now have the following.. could still be tidied though with the URL format? Mine is a bit of a bodge.. <?php if (PRODUCT_LIST_FILTER > 0) { if (isset($HTTP_GET_VARS['manufacturers_id'])) { $filterlist_sql = "select distinct c.categories_id as id, cd.categories_name as name from " . TABLE_PRODUCTS . " p, " . TABLE_PRODUCTS_TO_CATEGORIES . " p2c, " . TABLE_CATEGORIES . " c, " . TABLE_CATEGORIES_DESCRIPTION . " cd where p.products_status = '1' and p.products_id = p2c.products_id and p2c.categories_id = c.categories_id and p2c.categories_id = cd.categories_id and cd.language_id = '" . (int)$languages_id . "' and p.manufacturers_id = '" . (int)$HTTP_GET_VARS['manufacturers_id'] . "' order by cd.categories_name"; } else { $filterlist_sql= "select distinct m.manufacturers_id as id, m.manufacturers_name as name from " . TABLE_PRODUCTS . " p, " . TABLE_PRODUCTS_TO_CATEGORIES . " p2c, " . TABLE_MANUFACTURERS . " m where p.products_status = '1' and p.manufacturers_id = m.manufacturers_id and p.products_id = p2c.products_id and p2c.categories_id = '" . (int)$current_category_id . "' order by m.manufacturers_name"; } $filterlist_query = tep_db_query($filterlist_sql); if (tep_db_num_rows($filterlist_query) > 1) { echo '<ul>'; while ($filterlist = tep_db_fetch_array($filterlist_query)) { $filterid = $filterlist['id']; $manuname = $filterlist['name']; echo '<li><a href="' . tep_href_link(FILENAME_DEFAULT, 'cPath=' . substr(tep_get_path($category_id), 6)) . '?filter_id=' . $filterid . '">' . $manuname . '</a></li>'; } } echo '</ul>'; } ?> Link to comment Share on other sites Archived This topic is now archived and is closed to further replies. × × • Create New...
__label__pos
0.874719
August, 2012 • The Old New Thing Exiting a batch file without exiting the command shell -and- batch file subroutines • 17 Comments Prepare your party hats: Batch File Week is almost over. In your batch file, you may want to exit batch file processing (say, you encountered an error and want to give up), but if you use the exit command, that will exit the entire command processor. Which is probably not what you intended. Batch file processing ends when execution reaches the end of the batch file. The trick therefore is to use the goto command to jump to a label right before the end of the file, so that execution "falls off the end". @echo off if "%1"=="" echo You must provide a file name.&goto end if NOT EXIST "\\server\backup\%USERNAME%\nul" mkdir "\\server\backup\%USERNAME%" if NOT EXIST "\\server\backup\%USERNAME%\nul" echo Unable to create output directory.&goto end copy "%1" "\\server\backup\%USERNAME%" :end Here, there are two places where we abandon batch file execution. One is on an invalid parameter, and another is if the output directory couldn't be created (or if it isn't a directory at all). The batch command interpreter provides a courtesy label to simply this technique: The special goto target goto :eof (with the colon) jumps to the end of the batch file. It's as if every batch file had a hidden goto label called :eof on the very last line. The goto :eof trick becomes even more handy when you start playing with batch file subroutines. Okay, let's back up: Batch file subroutines? By using the call command, a batch file can invoke another batch file and regain control after that other batch file returns. (If you forget the call, then control does not return. In other words, the default mode for batch file invocation is chain.) In other words, the call command lets you invoke another batch file as a subroutine. The command line parameters are received by the other batch file as the usual numbered parameters %1, %2, etc. It's annoying having to put every subroutine inside its own batch file, so the command interpreter folks added a way to call a subroutine inside the same batch file. The syntax for this is call :label parameter parameter parameter. This is logically equivalent to a batch file recursively calling itself, except that execution begins at the specified label instead of the first line of the file. (It's as if a secret goto label were added to the top of the file.) And since it is a batch file, execution of the called subroutine ends when execution falls off the end of the file. And that's where the special goto target comes in handy. At the end of your subroutine, you can jump to the end of the batch file (so that execution falls off the end) by doing a goto :eof. In other words, goto :eof is the return statement for batch file subroutines. Let's take it for a spin: @echo off call :subroutine a b c call :subroutine d e f goto :eof :subroutine echo My parameters are 1=%1, 2=%2, 3=%3 goto :eof That final goto :eof is redundant, but it's probably a good habit to get into, like putting a break; at the end of your last case. The subroutine technique is handy even if you don't really care about the subroutine, because stashing the arguments into the %n parameters lets you use the tilde operators to process the inbound parameter. @echo off call :printfilesize "C:\Program Files\Windows NT\Accessories\wordpad.exe" goto :eof :printfilesize echo The size of %1 is %~z1 goto :eof Okay, this isn't actually much of a handy trick because you can also do it without a subroutine: @echo off for %%i ^ in ("C:\Program Files\Windows NT\Accessories\wordpad.exe") ^ do echo The size of %%i is %%~zi On the other hand, the subroutine trick combines well with the FOR command, since it lets you put complex content in the loop body without having to mess with delayed expansion: @echo off setlocal set DISKSIZE=1474560 set CLUSTER=512 set DISKS=1 set TOTAL=0 for %%i in (*) do call :onefile "%%i" set /a DISKS=DISKS+1 echo Total disks required: %DISKS% endlocal goto :eof :onefile set /a SIZE=((%~z1 + CLUSTER - 1) / CLUSTER) * CLUSTER if %SIZE% GEQ %DISKSIZE% ( echo File %1 does not fit on a floppy - skipped goto :eof ) set /a TOTAL=TOTAL+SIZE if %TOTAL% GEQ %DISKSIZE% ( echo ---- need another disk set /a DISKS=DISKS+1 set /a TOTAL=SIZE ) echo copy %1 goto :eof This program calculates the number of floppy disks it would take to copy the contents of the current directory without compression. The setlocal command takes a snapshot of the environment for restoration when we perform the endlocal at the end. That will clean up our temporary variables when we're done. The first two variables are parameters for the calculation, namely the disk capacity and the cluster size. (We're assuming that the root directory can hold all the files we may ultimately copy. Hey, this is just a demonstration, not a real program.) The next two variables are our running total of the number of disks we've used so far, and how many bytes we've used on the last disk. The for command iterates over all the files in the current directory. For each one, we call :onefile with the file name. The :onefile subroutine does all the real work. First, it takes the file size %~z1 and rounds it up to the nearest cluster. It then sees if that size is larger than a floppy disk; if so, then we're doomed, so we just skip the file. Otherwise, we add the file to the current disk and see if it fits. If not, then we declare the disk full and put the file on a brand new disk. After the loop is complete, we print the number of floppy disks we calculated. (This algorithm erroneously reports that no files require one disk. Fixing that is left as an exercise.) There's your quick introduction to the secret :eof label and batch file subroutines. [Raymond is currently away; this message was pre-recorded.] • The Old New Thing FORFILES, for your fancier batch file enumeration needs • 20 Comments Crack open open the champagne: Batch File Week is finally over! Variations on the for /f %%i in ('dir /b ...') will let you repeat an operation on the contents of a directory, possibly even recursively if you add the /s option, with some basic attribute-level filtering if you add the /a or /a- flags. For your fancy recursive file operations, there's a tool called FORFILES which iterates through the contents of a directory (recursively if requested), executing a command on each item it finds. It also has additional filtering capability, like selecting files based on their last-modified time. For example, you could copy all files in the current directory which were modified today: forfiles /D +0 /c "cmd /c copy @file \\server\today" Unfortuantely, the /D option is not as flexible as one might like. For example, while it can pick files modified today, it can't pick files modified in the last week, because the relative-date-picker knows only how to pick files modified on or before a date in the past or files modified on or after a date in the future. (Who the heck wants to operate on files modified in the future? Except perhaps the Microsoft Research folks who are working on that time machine.) You can type FORFILES /? for more information on what you can do (and by seeing what's omitted, what you can't do). If the command you want to execute is rather long, you can offload it back into the batch file being executed: @echo off if "%1"=="/callback" goto callback forfiles /D +0 /c "cmd /c call "%~f0" /callback @isdir @file @fsize" goto :eof :callback rem %2 = @isdir rem %3 = @file rem %4 = @fsize if %2==TRUE echo Skipping directory %3.&goto :eof echo Copying file %3 to \\server\today (%4 bytes) One gotcha here is that since each command runs in a sub-shell, it can read environment variables, but any modifications it makes to environment variables will be lost since the command is modifying only its local environment variables. A workaround for this is to use FORFILES to select the data to operate on, but use FOR to actually perform the operation. Since FOR runs inside the main command interpreter, it can modify environment variables. set TOTALSIZE=0 for /f %%i in ('forfiles /d +0 /c "cmd /c if @isdir==FALSE echo @fsize"') ^ do set /a TOTALSIZE=TOTALSIZE + %%i Here, we use FORFILES to enumerate all the files (not directories) modified today and print their sizes. We wrap this inside a FOR which reads the sizes and adds them up. If the operation you want to perform on each file is complex, you can of course offload it into a subroutine call. for /f %%i ^ in ('forfiles /d +0 /c "cmd /c if @isdir==FALSE echo @fsize"') ^ do call :subroutine %%i I'm cheating here because I know that @fsize doesn't contain spaces. If you are processing file names, then you need to be more careful. for /f "tokens=*" %%i ^ in ('forfiles /d +0 /c "cmd /c if @isdir==FALSE echo @fname"') ^ do call :subroutine %%i • The Old New Thing How do I find the most recently created file in a directory from a batch file? • 29 Comments We've reached Hump Day of Batch File Week. Remember, nobody actually likes batch programming. You merely tolerate it. Today, we'll find the most recently-created item in a directory. (For example, we have a server that holds our daily builds, and you might want to write a batch file that automatically installs the latest build.) There may be better ways, but what I do is ask for a list sorted oldest-to-newest, and then choose the last one. for /f %%i in ('dir /b/a-d/od/t:c') do set LAST=%%i echo The most recently created file is %LAST% This trick works by asking the dir command to list just the names (/b) of just the files /a-d, sorted by date (/od), based on the creation time (/t:c). Each time a new file is reported, its name is stored in the LAST variable, overwriting the previous one. When the loop finishes, the LAST variable contains the name of the newest file, since that's the one that didn't get overwritten. You can tweak the command line to perform other queries. For example, if you want the newest file, then just ask for a reverse sort (/o-d). If you want the file sorted by modified time rather than creation time, then use /t:w. You get the idea. Limitations: The implementation above assumes that no files contain spaces in their name. Removing this limitation is left as an exercise. • The Old New Thing What is SysFader and why is it always crashing? • 23 Comments If you type SysFader into your favorite search engine, you'll find lots of hits from people asking, "What is SysFader, and why is it always crashing Internet Explorer?" SysFader: iexplore.exe - Application Error The exception unknown software exception (0xe06d7363) occurred in the application at location 0x7c812afb. When a program encounters a fatal error, the system crash dialog appears. And it needs to put somebody's name in the title of the dialog to indicate which application crashed. Sure, it has the process name (iexplore.exe), but it has this nagging feeling that it can do better. After all, not everybody will know that "AcroRd32.exe" is "The menu for my favorite restaurant that I was looking at in Adobe Acrobat Reader". So it goes looking for a window that belongs to the thread so it can steal the window's title and use that to help the user understand what it was that crashed. And if can't find any visible windows, it will go for invisible ones, on the theory that, "Well maybe the application crashed before it could show the window." Now let's see what happens when we apply this logic to SysFader. SysFader is a helper window used by Internet Explorer to perform fade-out animations. It really doesn't do much, but it is a window, albeit an invisible one when there are no animations in progress. SysFader happens to run on a shared worker thread. If that worker thread is being borrowed by some other background task, and that background task crashes, then when the crash dialog appears and looks around for a window to put in the title, it says "Rats, I don't have any visible windows, but I do have this invisible one, so I'll go ahead and put that one in the title bar. Better than nothing." It's sort of the error-reporting version of the Politician's Fallacy: 1. A window must be blamed. 2. This is a window. 3. Therefore, we must blame it. It's like your photo appearing in a newspaper article headlined Robbery at Woodgrove Bank, Suspect At Large, not because you're the suspect, but because you happen to have been in the building at the time of the robbery. Bonus chatter: You probably recognize the exception code as an unhandled C++ exception. Internet Explorer doesn't use C++ exceptions, so the exception most likely came from a plug-in. [Raymond is currently away; this message was pre-recorded.] • The Old New Thing Microspeak: planful • 20 Comments Every year, Microsoft invites its employees to fill out a poll which asks questions regarding all sorts of things. One of the things Microsoft employees are asked to evaluate is whether they think that their vice president is acting planfully. The tricky part about that question is that nobody knows exactly what the word planfully means. Merriam-Webster defines planful as "full of plans : RESOURCEFUL, SCHEMING." Is that what they're asking us? Whether our vice president is resourceful and scheming? Is that a good thing or a bad thing? The OED, on the other hand, defines it as "Full or rich in plans; characterized by planning; organized, systematic." It's not clear whether this means that the person goes into a situation with a plan ahead of time, or that once they get into a situation, they develop a plan for getting out. (Maybe both?) I caught a vice president using the word planful during a meeting, so I raised my hand and asked, "What does planful mean?" (Most people don't have the nerve to raise their hand during a meeting and ask, "Um, what does that word mean?") The vice president replied, "Actually, I don't think it's a real word. I think [name of a senior manager] made it up." I followed up: "Well, in that case, why are we asked to evaluate you every year on whether your actions are planful? How can we answer a question that uses a made-up word nobody knows the definition of?" Obligatory xpclient example: "Was it a planful decision to introduce the auto-sorting problem?" Reminder: Microspeak is not merely terms unique to Microsoft, but also terms used at Microsoft more often than in general. • The Old New Thing The shifting sands of "Run as different user" • 31 Comments A customer liaison asked the following question on behalf of his customer. When I do a shift-right-click on a shortcut to a program, I see the following: • On Windows Server 2008, shift-right-click does not offer the option Run as different user. • On Windows 7, shift-right-click does offer the option Run as different user. On Windows Server 2008 R2 (the server counterpart to Windows 7), shift-right-click does offer the option Run as different user. The option to run a program as another user (other than Administrator) was present in Windows XP, but it was lost in Windows Vista. It appears that we responded to those complaints by restoring the functionality in Windows 7. Is that right? The odd thing is that my customer has the Run as different user option available on their Windows 7 machines, but not on their Windows Server 2008 R2 machines. Does whether you have access to Run as different user depend on how you installed Windows Server 2008 R2? (If it matters, my customer installed it via the Microsoft Deployment Toolkit.) I found this question interesting for a variety of reasons. First of all, it comes dangerously close to being one of those Please tell me I'm not hallucinating type of requests. These support requests take the following peculiar form: We did X, then Y, then Z, and then Q happened. Is that right? "Um, if you say so. I wasn't there." But in this case, it started out looking like it was going to turn into a Please tell me I'm not hallucinating request, then veered into "Is X the reason the feature was added back?" This is a rather peculiar question, because knowing the answer one way or the other doesn't actually take you any closer to a solution to the problem. (And I don't know the answer, but fortunately, it wasn't relevant to solving the customer's problem.) Another interesting thing about the customer's question is that he never actually comes out and asks the question. He sort of says a few related things, and asks a tangential question, but never comes right out and asks, "How do I get the Run as different user option to work on my Windows Server 2008 R2 machine?" It's like a kid who pointedly hangs around a candy bowl, hoping that an adult will say, "Here, have a piece of candy." You're a grown-up now. You don't have to linger around the candy bowl hoping somebody will figure out that you want some candy. You should just ask, "May I have a piece of candy?" My psychic powers tell me that they have set the Require trusted path for credential entry policy. The Run as different user feature is disabled if you set this policy. The customer liaison replied, "It appears that the option for Require trusted path for credential entry is not enabled. The customer is going to do a clean install and test on a non-domain-joined machine to avoid any GPOs." Some time passed, and the customer liaison reported back with a resolution. The culprit was indeed the Require trusted path for credential entry policy. It didn't show up in a GPO search because they were setting the policy via a script rather than deploying a group policy object. It was very nice of the customer liaison to reply with confirmation that the problem was solved. This is, unfortunately, a notable event. Most of the time, people never report back if your suggestion solved their program; they only come back if your suggestion didn't help. Which means you're not sure if your suggestion solved the problem, or if it didn't solve the problem but they decided to continue the investigation somewhere else. Bonus chatter: This shows yet another reason why you should use Group Policy Objects to manage group policies rather than custom scripts that whack registry keys. In addition to the fact that registry key whacking may not work, there are tools for processing Group Policy Objects that make this sort of investigation much easier. • The Old New Thing Command line tool to manage Windows 7 Libraries, with source code • 14 Comments A customer asked if there was a command-line tool for managing Windows 7 Libraries so that they could create and deploy libraries across their organization. Not only is there such a tool, it even comes with source code. The Shell Library Command Line Sample shows how to manage Windows 7 libraries programmatically via the IShell­Library interface. And it's actually a useful program on its own. Usage: shlib.exe SUBCOMMAND Displays and modifies the attributes of Shell Libraries. Supported commands: create Creates a library at the specified path. info Prints info about the given library. enum Enumerates the folders in the library. setattrib Modifies the attributes of the library. add Adds the specified folder to the specified library. remove Removes the specified folder from the library. setsaveloc Sets the default save location of the library. resolve Resolves the specified folder in the library. resolveall Resolves all locations in the library in bulk. manage Displays the Manage Library Dialog for the library. For example, to see all the folders that are part of your Documents library, use the command shlib enum FOLDERID_DocumentsLibrary Each of the commands has further sub-options. Usage: create shlib.exe [OPTIONS] [...] Creates a library at the specified path. Options: -create[:ARG] Specifies that a new library should be created. <none> Fail if the library already exists. overwrite Overwrite any existing library. uniquename Generate a unique name in case of conflict. The library may be specified by a file system path, or by a KNOWNFOLDERID (e.g. "FOLDERID_DocumentsLibrary"). And the source code shows how to perform each of these actions programmatically. The IShellLibrary interface is also supported by the Windows API Code Pack for .NET, so you C# folks can use it too. (One thing I noticed that is missing from the sample is a seticon command. Shouldn't be hard to implement. You just call IShell­Library::Set­Icon.) • The Old New Thing What happened to the Windows 2000 "Language settings for the system" control panel? • 41 Comments In 2011, a customer had a question about migrating from Windows 2000 to Windows XP. (That's right, this customer was still using Windows 2000 in the year 2011.) Specifically, they noted that in Windows 2000, they can select multiple languages in the "Language settings for the system" portion of the Regional Options control panel, and they couldn't find the corresponding control panel setting in Windows XP. Regional Options General Numbers Currency Time Date Input Locales   Settings for the current user Many programs support international settings for numbers, currencies, times, and dates. Set the locale in order to use the standard settings. Your locale (location): English (United States) Language settings for the system Your system is configured to read and write documents in multiple languages. Arabic Armenian Baltic Central Europe Cyrillic In Windows 2000, "Language settings for the system" provides the option to install support (such as code pages, keyboard layouts, and fonts) for various language groups. In Windows XP, the big list of language groups was reduced to three categories: • Basic (Baltic, Central Europe, Cyrillic, Greek, Turkish, Western Europe) • Complex (Arabic, Armenian, Georgian, Hebrew, Indic, Vietnamese, Thai) • East Asia (Chinese Simplified, Chinese Traditional, Japanese, Korean) The Basic category is always installed. To install the Complex or East Asia categories, use the "Supplemental language support" section of the Regional and Language Options control panel. Windows XP Regional and Language Options property sheet, with a section titled "Supplemental language support" with options "Install files for complex script and right-to-left languages (including Thai)" and "Install files for East Asian languages Someday, that customer might upgrade to Windows Vista, so I may as well answer the question right now. In Windows Vista and onward, things were simplified even more: All language groups are installed at all times. The dialog box went away completely since there were no options remaining. As it turns out, the customer's problem had nothing to do with language support. Of course, they didn't come out and describe the problem they were having; rather, they reduced the problem into multiple pieces, and then asked for help on one specific piece. They tried out a solution based on this new information, but it didn't solve the problem, because as it turns out, the Language settings for the system control panel was a red herring. If they had told us what their original problem was, we could have told them "But this setting will do nothing to solve your problem. What you really need is over there." Tomorrow, we'll look at the customer's actual problem. (So please don't try to guess or you'll ruin the surprise. I can't believe I had to write that.) • The Old New Thing Of what possible legitimate use are functions like CreateRemoteThread, WriteProcessMemory, and VirtualProtectEx? • 26 Comments There are a bunch of functions that allow you to manipulate the address space of other processes, like Write­Process­Memory and Virtual­Alloc­Ex. Of what possible legitimate use could they be? Why would one process need to go digging around inside the address space of another process, unless it was up to no good? These functions exist for debuggers. For example, when you ask the debugger to inspect the memory of the process being debugged, it uses Read­Process­Memory to do it. Similarly, when you ask the debugger to update the value of a variable in your process, it uses Write­Process­Memory to do it. And when you ask the debugger to set a breakpoint, it uses the Virtual­Protect­Ex function to change your code pages from read-execute to read-write-execute so that it can patch an int 3 into your program. If you ask the debugger to break into a process, it can use the Create­Remote­Thread function to inject a thread into the process that immediately calls Debug­Break. (The Debug­Break­Process was subsequently added to make this simpler.) But for general-purpose programming, these functions don't really have much valid use. They tend to be used for nefarious purposes like DLL injection and cheating at video games. [Raymond is currently away; this message was pre-recorded.] • The Old New Thing Adventures in undefined behavior: The premature downcast • 45 Comments A customer encountered the following problem: class Shape { public: virtual bool Is2D() { return false; } }; class Shape2D : public Shape { public: virtual bool Is2D() { return true; } }; Shape *FindShape(Cookie cookie); void BuyPaint(Cookie cookie) { Shape2D *shape = static_cast<Shape2D *>(FindShape(cookie)); if (shape->Is2D()) { .. do all sorts of stuff ... } } The Buy­Paint function converts the cookie back to a Shape object, and then checks if the object is a Shape2D object by calling Is­2D. If so, then it does some more stuff to figure out what type of paint to buy. (Note to nitpickers: The actual scenario was not like this, but I presented it this way to illustrate the point. If you say "You should've used RTTI" or "You should've had a BuyPaint method on the Shape class", then you're missing the point.) The programmers figured they'd save some typing by casting the result of Find­Shape to a Shape2D right away, because after all, since Is­2D is a virtual method, it will call the right version of the function, either Shape::Is­2D or Shape2D::Is­2D, depending on the actual type of the underlying object. But when compiler optimizations were turned on, they discovered that the call to Is­2D was optimized away, and the Buy­Paint function merely assumed that it was always operating on a Shape2D object. It then ended up trying to buy paint even for one-dimensional objects like points and lines. Compiler optimization bug? Nope. Code bug due to reliance on undefined behavior. The C++ language says (9.3.1) "If a nonstatic member function of a class X is called for an object that is not of type X, or of a type derived from X, the behavior is undefined." In other words, if you are invoking a method on an object of type X, then you are promising that it really is of type X, or a class derived from it. The code above violates this rule, because it is invoking the Is­2D method on a Shape2D*, which therefore comes with the promise "This really is a Shape2D object (or something derived from it)." But this is a promise the code cannot deliver on, because the object returned by Find­Shape might be a simple Shape. The compiler ran with the (false) promise and said, "Well, since you are guaranteeing that the object is at least a Shape2D, and since I have studied your code and determined that no classes which further derive from Shape2D override the Is­2D method, I have therefore proved that the final overrider is Shape2D::Is­2D and can therefore inline that method." Result: The compiler inlines Shape2D::Is­2D, which returns true, so the "if" test can be optimized out. The compiler can assume that Buy­Paint is always called with cookies that represent two-dimensional objects. The fix is to do the annoying typing that the original authors were trying to avoid: void BuyPaint(Cookie cookie) { Shape *shape = FindShape(cookie); if (shape->Is2D()) { Shape2D *shape2d = static_cast<Shape2D *>(shape); .. do all sorts of stuff (with shape2d) ... } } Page 1 of 3 (29 items) 123
__label__pos
0.644993
3 $\begingroup$ How can I solve this? I saw Solving the recurrence $T(n)=T(n-1)*T(n-2)$ but I don't know how I can apply it to $T(n)=T(n-1)/T(n-2)$? $\endgroup$ 5 $\begingroup$ Then assume $T(0) = a, T(1)=b$ such as $a \neq 0$ and $b \neq 0$. You could write down first few terms and deduce the pattern $$T(0)= a$$ $$T(1)= b$$ $$T(2)= \frac{T(1)}{T(0)} = \frac{b}{a}$$ $$T(3)= \frac{T(2)}{T(1)} = \frac{b/a}{b} = \frac{1}{a}$$ $$T(4)= \frac{T(3)}{T(2)} = \frac{1/a}{b/a} = \frac{1}{b}$$ $$T(5)= \frac{T(4)}{T(3)} = \frac{1/b}{1/a} = \frac{a}{b}$$ $$T(6)= \frac{T(5)}{T(4)} = \frac{a/b}{1/b} = a = T(0)$$ $$T(7)= \frac{T(6)}{T(5)} = \frac{a}{a/b} = b = T(1)$$ $$...$$ Thus the values of $T(n)$ repeat with period of $6$. | cite | improve this answer | | $\endgroup$ 2 $\begingroup$ If you are dealing with a recurrence for which you have no idea how to attempt the solution, it is always a good idea to try working out the first few terms by hand (or using software such as Excel). If you try this, the solution for this recurrence will probably be quite obvious... | cite | improve this answer | | $\endgroup$ • 1 $\begingroup$ I tried this. Took me exactly 12 seconds to set up a spreadsheet that made the solution shown by fade2black absolutely obvious. $\endgroup$ – gnasher729 Nov 4 '17 at 15:01 Your Answer By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.999263
research!rsc Thoughts and links about programming, by RSS Floating Point to Decimal Conversion is Easy Posted on Friday, July 1, 2011. Floating point to decimal conversions have a reputation for being difficult. At heart, they're really very simple and straightforward. To prove it, I'll explain a working implementation. It only formats positive numbers, but expanding it to negative numbers, zero, infinities and NaNs would be very easy. An IEEE 64-bit binary floating point number is an integer v in the range [252, 253) times a power of two: f = v × 2e. Constraining the fractional part of the unpacked float64 to the range [252, 253) makes the representation unique. We could have used any range that spans a multiplicative factor of two, but that range is the first one in which all the values are integers. In Go, math.Frexp unpacks a float64 into f = fr × 2exp where fr is in the range [½, 1). (C's frexp does too.) Converting to our integer representation is easy: fr, exp := math.Frexp(f) v := int64(fr * (1<<53)) e := exp - 53 To convert the integer to decimal, we'll use strconv.Itoa64 out of laziness; you know how to write the direct code. buf := make([]byte, 1000) n := copy(buf, strconv.Itoa64(v)) The allocation of buf reserves space for 1000 digits. In general 1/2e requires about 0.7e non-zero decimal digits to write in full. For a float64, the smallest positive number is 1/21074, so 1000 digits is plenty. The second line sets n to the number of bytes copied in from the string representation of the (integer) v. Throughout, n will be the number of digits in buf. Note that we're working with ASCII decimal digits '0' to '9', not bytes 0-9. Now we've got the decimal for v stored in buf. Since f = v × 2e, all that remains is to multiply or divide buf by 2 the appropriate number of times (e or -e times). Here's the loop to handle positive e: for ; e > 0; e-- { δ := 0 if buf[0] >= '5' { δ = 1 } x := byte(0) for i := n-1; i >= 0; i-- { x += 2*(buf[i] - '0') x, buf[i+δ] = x/10, x%10 + '0' } if δ == 1 { buf[0] = '1' n++ } } dp := n Each iteration of the inner loop overwrites buf with twice buf. To start, the code determines whether there will be a new digit (δ = 1), which happens when the leading digit is at least 5. Then it runs up the number from right to left, just as you learned in grade school, multiplying each digit by two and using x to carry the result. The digit buf[i] moves into buf[i+δ]. At the end, if the code needs to insert an extra digit, it does. Run e times. After the loop finishes, we record in dp the current location of the decimal point. Since buf is still an integer (it started as an integer and we've only doubled things), the decimal point is just past all the digits. Of course, e might have started out negative, in which case we've done nothing and still need to halve buf e times: for ; e < 0; e++ { if buf[n-1]%2 != 0 { buf[n] = '0' n++ } δ, x := 0, byte(0) if buf[0] < '2' { δ, x = 1, buf[0] - '0' n-- dp-- } for i := 0; i < n; i++ { x = x*10 + buf[i+δ] - '0' buf[i], x = x/2 + '0', x%2 } } Dividing by two needs an extra digit if the last digit is odd, to store the final half; in that case we add a 0 to buf so that we'll have room to store a completely precise answer. After adding the 0, we can set up for the division itself. If the first digit is less than 2, it's going to become a zero; in the interest of avoiding leading zeros we use an initial partial value x and move digits up (δ = 1) during the division. The multiplication ran from right to left copying from buf[i] to buf[i+δ]. The division runs left to right copying from buf[i+δ] to buf[i]. Now buf[0:n] has all the non-zero digits in the exact decimal representation of our f, and dp records where to put the decimal point. We could stop now, but we might as well implement correct rounding while we're here. The interesting case is when we have more digits than requested (n > ndigit). To make the decision, just like in grade school, we look at the first digit being removed. If it's less than 5, we round down by truncating. If it's greater than 5, we round up by incrementing what's left after truncation. If it's equal to 5, we have to look at the rest of the digits being dropped. If any of the rest of the digits are nonzero, then rounding up is more accurate. Otherwise we're right on the line. In grade school I was taught to handle this case by rounding up: 0.5 rounds to 1. That's a simple rule to teach, but breaking this tie by rounding to the nearest even digit has better numerical properties, because it rounds up and down equally often, and it is the usual rule employed in real calculations. This all results in the thunderclap rounding condition: buf[prec] > '5' || buf[prec] == '5' && (nonzero(buf[prec+1:n]) || buf[prec-1]%2 == 1) You can see why children are taught buf[prec] >= '5' instead. Anyway, if we have to increment after the truncation, we're still working in decimal, so we have to handle carrying incremented 9s ourselves: i := prec-1 for i >= 0 && buf[i] == '9' { buf[i] = '0' i-- } if i >= 0 { buf[i]++ } else { buf[0] = '1' dp++ } The loop handles the 9s. The if increments what's left, or, if we turned the whole string into zeros, it simulates inserting a 1 at the beginning by changing the first digit to a 1 and moving the decimal place. Having done that and set n = prec, we can piece together the actual number: return fmt.Sprintf("%c.%se%+d", buf[0], buf[1:n], dp-1) That prints the first digit, then a decimal point, then the rest of the digits, and finally the N suffix. The exponent must adjust the number by dp−1 because we are printing all but the first digit after the decimal point. That's all there is to it. To print  f = v × 2e  you write v in decimal, multiply or divide the decimal by 2 the right number of times, and print what you're left holding. The Plan 9 C library and the Go library both use variants of the approach above. On my laptop, the code above does 100,000 conversions per second, which is plenty for most uses, and the code is easy to understand. Why are some converters more complicated than this? Because you can make them a little faster. On my laptop, glibc's sprintf(buf, "%.50e", M_PI) is about 15 times faster than an equivalent print using the code above, because the implementation of sprintf uses much more sophisticated mathematics to speed the conversion. If you have a stomach for pages of equations, there are many interesting papers that discuss how to do this conversion and its inverse more quickly: Just remember: The conversion is easy. The optimizations are hard. Code The full program is available in this Gist or you can try it using the Go playground. (Comments originally posted via Blogger.) • nicolas cellier (July 1, 2011 10:44 AM) In general 1/2e requires about 0.7e non-zero decimal digits to write in full: It requires exactly e digits after the decimal point, but removing the leading zeros is interesting... 2^10 > 10^3, thus 2^-10e has at least 3e leading zeroes. Thus 2^-e has at least trunc(0.3e) leading zeroes, thus the 0.7e digits. Very good to know :) • Jan-willem (July 1, 2011 10:47 AM) Do you simply cap prec to avoid extra decimal places? I'm worried about the 1.99999999999 problem that Steele & White discuss, where you really want the shortest decimal that reads as the number you've got. • Russ Cox (July 1, 2011 10:55 AM) http://golang.org/src/pkg/strconv/ftoa.go's roundShortest has the logic for the '1.9999999999' problem. Basically, it's easy to figure out the (v', e') for the minimum (least) number you could print to get the right answer when converted back, and also the maximum number. Then you walk all three at the same time until they start to differ. Then you can round. If a short form is a valid answer, it you'll get a digit difference very quickly. • GlacJAY (July 27, 2011 2:08 AM) Lost minus sign in the power's range.
__label__pos
0.839085
extract the geometric surface from a trimmed face I have a face cut by boolean Cut operation. I need to get the geometric surface (not the resulting face which is a topological entity) representing the result (not the original full Surface which I get if I use BRepTool::Surface option). Is there a way to do this? Pawel's picture Hi Chanaka, maybe if you query for the parameters of your face with BRepTools::UVBounds you can restrict the resulting surface. Pawel
__label__pos
0.966488
[Free] 2018(July) Ensurepass Cisco 200-125 Dumps with VCE and PDF 51-60 Ensurepass.com : Ensure you pass the IT Exams 2018 July Cisco Official New Released 200-125 100% Free Download! 100% Pass Guaranteed! CCNA Cisco Certified Network Associate CCNA (v3.0) Question No: 51 – (Topic 2) At which layer of the OSI model is RSTP used to prevent loops? 1. physical 2. data link 3. network 4. transport Answer: B Explanation: RSTP and STP operate on switches and are based on the exchange of Bridge Protocol Data Units (BPDUs) between switches. One of the most important fields in BPDUs is the Bridge Priority in which the MAC address is used to elect the Root Bridge -gt; RSTP operates at Layer 2 – Data Link layer -gt;. Question No: 52 – (Topic 2) What parameter can be different on ports within an EtherChannel? 1. speed 2. DTP negotiation settings 3. trunk encapsulation 4. duplex Answer: B Explanation: For an etherchannel to come up, the speed, duplex and the trunk encapsulation must be the same on each end. Question No: 53 – (Topic 2) Refer to the exhibit. Ensurepass 2018 PDF and VCE The output that is shown is generated at a switch. Which three statements are true? (Choose three.) 1. All ports will be in a state of discarding, learning, or forwarding. 2. Thirty VLANs have been configured on this switch. 3. The bridge priority is lower than the default value for spanning tree. 4. All interfaces that are shown are on shared media. 5. All designated ports are in a forwarding state. 6. This switch must be the root bridge for all VLANs on this switch. Answer: A,C,E Explanation: From the output, we see that all ports are in designated role (forwarding state). The command “show spanning-tree vlan 30 only shows us information about VLAN 30. We don’t know how many VLAN exists in this switch -gt;. The bridge priority of this switch is 24606 which is lower than the default value bridge priority 32768. All three interfaces on this switch have the connection type “p2p”, which means Point-to- point environment – not a shared media. The only thing we can specify is this switch is the root bridge for VLAN 3o but we cannot guarantee it is also the root bridge for other VLANs. Question No: 54 – (Topic 2) Refer to the exhibit. Ensurepass 2018 PDF and VCE Given the output shown from this Cisco Catalyst 2950, what is the reason that interface FastEthernet 0/10 is not the root port for VLAN 2? 1. This switch has more than one interface connected to the root network segment in VLAN 2. 2. This switch is running RSTP while the elected designated switch is running 802.1d Spanning Tree. 3. This switch interface has a higher path cost to the root bridge than another in the topology. 4. This switch has a lower bridge ID for VLAN 2 than the elected designated switch. Answer: C Explanation: Since the port is in the blocked status, we must assume that there is a shorter path to the root bridge elsewhere. Question No: 55 – (Topic 2) Refer to the exhibit. Ensurepass 2018 PDF and VCE Which WAN protocol is being used? 1. ATM 2. HDLC 3. Frame Relay 4. PPP Answer: C Explanation: This question is to examine the show int command. According to the information provided in the exhibit, we can know that the data link protocol used in this network is the Frame Relay protocol. “LMI enq sent…” Question No: 56 – (Topic 2) Refer to the exhibit. Ensurepass 2018 PDF and VCE Why has this switch not been elected the root bridge for VLAN1? 1. It has more than one interface that is connected to the root network segment. 2. It is running RSTP while the elected root bridge is running 802.1d spanning tree. 3. It has a higher MAC address than the elected root bridge. 4. It has a higher bridge ID than the elected root bridge. Answer: D Explanation: The root bridge is determined by the lowest bridge ID, and this switch has a bridge ID priority of 32768, which is higher than the roots priority of 20481. Question No: 57 – (Topic 2) Which command enables RSTP on a switch? 1. spanning-tree uplinkfast 2. spanning-tree mode rapid-pvst 3. spanning-tree backbonefast 4. spanning-tree mode mst Answer: B Explanation: Rapid Spanning Tree Protocol (RSTP) is an enhancement of the original STP 802.1D protocol. The RSTP 802.1w protocol is an IEEE open implementation. Cisco has its own proprietary implementation of RSTP, that includes the benefits of its Per-VLAN spanning tree protocols, called Rapid-PVST . To activate the Rapid-PVST protocol: switch(config)#spanning-tree mode rapid-pvst Question No: 58 – (Topic 2) Refer to the exhibit. Ensurepass 2018 PDF and VCE Switch-1 needs to send data to a host with a MAC address of 00b0.d056.efa4. What will Switch-1 do with this data? 1. Switch-1 will drop the data because it does not have an entry for that MAC address. 2. Switch-1 will flood the data out all of its ports except the port from which the data originated. 3. Switch-1 will send an ARP request out all its ports except the port from which the data originated. 4. Switch-1 will forward the data to its default gateway. Answer: B Explanation: This question tests the operating principles of the Layer 2 switch. Check the MAC address table of Switch1 and find that the MAC address of the host does not exist in the table. Switch1 will flood the data out all of its ports except the port from which the data originated to determine which port the host is located in. Switches work as follows: ->Switches learn the MAC addresses of PCs or workstations that are connected to their switch ports by examining the source address of frames that are received on that port. ->Machines may have been removed from a port, turned off, or moved to another port on the same switch or a different switch. ->This could cause confusion in frame forwarding. ->The MAC address entry is automatically discarded or aged out after 300 seconds ->If there is not MAC address of destination host in MAC table, switch sends broadcast to all ports except the source to find out the destination host. In output there is no MAC address of give host so switch floods to all ports except the source port. Question No: 59 – (Topic 2) What is one benefit of PVST ? 1. PVST supports Layer 3 load balancing without loops. 2. PVST reduces the CPU cycles for all the switches in the network. 3. PVST allows the root switch location to be optimized per VLAN. 4. PVST automatically selects the root bridge location, to provide optimized bandwidth usage. Answer: C Explanation: The PVST provides Layer 2 load-balancing for the VLAN on which it runs. You can create different logical topologies by using the VLANs on your network to ensure that all of your links are used but that no one link is oversubscribed. Each instance of PVST on a VLAN has a single root switch. This root switch propagates the spanning-tree information associated with that VLAN to all other switches in the network. Because each switch has the same information about the network, this process ensures that the network topology is maintained and optimized per VLAN. Reference: http://www.cisco.com/en/US/docs/switches/lan/catalyst3750x_3560x/software/release/12.2 _55_se/configuration/guide/swstp.html Question No: 60 – (Topic 2) Which three of these statements regarding 802.1Q trunking are correct? (Choose three.) 1. 802.1Q native VLAN frames are untagged by default. 2. 802.1Q trunking ports can also be secure ports. 3. 802.1Q trunks can use 10 Mb/s Ethernet interfaces. 4. 802.1Q trunks require full-duplex, point-to-point connectivity. 5. 802.1Q trunks should have native VLANs that are the same at both ends. Answer: A,C,E Explanation: By default, 802.1Q trunk defined Native VLAN in order to forward unmarked frame. Switches can forward Layer 2 frame from Native VLAN on unmarked trunks port. Receiver switches will transmit all unmarked packets to Native VLAN. Native VLAN is the default VLAN configuration of port. Note for the 802.1Q trunk ports between two devices, the same Native VLAN configuration is required on both sides of the link. If the Native VLAN in 802.1Q trunk ports on same trunk link is properly configured, it could lead to layer 2 loops. The 802.1Q trunk link transmits VLAN information through Ethernet. 100% Ensurepass Free Download! Download Free Demo:200-125 Demo PDF 100% Ensurepass Free Guaranteed! 200-125 Dumps EnsurePass ExamCollection Testking Lowest Price Guarantee Yes No No Up-to-Dated Yes No No Real Questions Yes No No Explanation Yes No No PDF VCE Yes No No Free VCE Simulator Yes No No Instant Download Yes No No Leave a Reply Your email address will not be published. Required fields are marked * This site uses Akismet to reduce spam. Learn how your comment data is processed.
__label__pos
0.968635
Responsive Web Design Responsive Web Design Responsive Web Design in Dubai UAE Responsive Web Design in Dubai UAE : Responsive Web Design: Creating a Seamless User Experience Across Devices In today’s digital age, having a strong online presence is essential for businesses and individuals alike. With the widespread use of smartphones, tablets, and computers, it’s crucial to ensure that your website looks and functions optimally across all devices. This is where responsive web design comes into play. What is Responsive Web Design? Responsive web design is an approach to creating websites that provide an optimal viewing and interaction experience across a wide range of devices. This means that the layout and content of the website automatically adjust to fit the screen size, resolution, and capabilities of the device being used. Why is Responsive Web Design Important? With the increasing number of people accessing the internet through smartphones and tablets, having a responsive website is no longer just a nice-to-have feature – it’s a necessity. According to research, mobile devices now account for over half of global web traffic. This means that if your website isn’t optimized for mobile users, you could be missing out on a significant portion of your audience. Additionally, responsive web design is also essential for search engine optimization (SEO). Google, the largest search engine in the world, has made mobile-friendliness a key factor in its ranking algorithm. This means that websites that are not mobile-friendly are likely to rank lower in search results, potentially leading to a loss of traffic and business. The Benefits of Responsive Web Design 1. Improved User Experience: A responsive website provides a seamless and consistent user experience across all devices, leading to higher engagement and satisfaction. 2. Increased Reach: By optimizing your website for mobile users, you can reach a larger audience and tap into new markets. 3. Better SEO: Responsive websites are favored by search engines, leading to higher rankings and increased organic traffic. 4. Cost-Effectiveness: Instead of creating separate websites for different devices, responsive web design allows you to maintain a single website that adapts to all screens, saving time and resources. 5. Future-Proofing: As new devices with varying screen sizes and resolutions continue to emerge, responsive web design ensures that your website remains compatible with the latest technology. Best Practices for Responsive Web Design To ensure that your website is fully responsive, here are some best practices to keep in mind: 1. Use a Mobile-First Approach: Start by designing for mobile devices and then scale up to larger screens. This ensures that the most important content and features are prioritized for mobile users. 2. Flexible Grids and Layouts: Use fluid grids and flexible layouts that can adapt to different screen sizes without compromising the design or functionality. 3. Media Queries: Use CSS media queries to apply different styles based on the screen size, orientation, and resolution of the device. 4. Optimized Images: Use responsive images that are appropriately sized for different devices to minimize load times and bandwidth usage. 5. Prioritize Performance: Optimize your website for speed and performance to ensure a smooth user experience across all devices. Responsive Web Design in Action To illustrate the impact of responsive web design, let’s take a look at an example. Imagine a user browsing a non-responsive website on their smartphone. They would have to zoom in and out to read the content, and the layout may appear distorted or inaccessible. This can lead to frustration and a high bounce rate as users abandon the site in favor of a more user-friendly experience. On the other hand, a responsive website would automatically adjust to fit the user’s screen, providing a seamless and enjoyable browsing experience. Conclusion In today’s multi-device world, responsive web design is no longer an option – it’s a requirement. By implementing responsive web design, you can ensure that your website is accessible and user-friendly across all devices, leading to improved user experience, increased reach, better SEO, and cost-effectiveness. As technology continues to evolve, responsive web design will remain essential for creating a seamless and effective online presence. Embracing responsive web design is key to staying ahead in the digital landscape and meeting the needs of today’s diverse internet users. Digital Marketing Agency in Dubai for Entrepreneurs, Startups and Small Businesses in UAE. Best Digital Marketing Agency for Small Startups.in Dubai UAE. https://internetmarketingseo2.info/ Responsive Web Design in Dubai UAE Responsive Web Design in Dubai UAE : Best Digital Marketing Agency in Dubai UAE : Digital Marketing Agency in Dubai for Entrepreneurs, Startups and Small Businesses in Dubai, United Arab Emirates Digital Marketing Agency in Dubai for Entrepreneurs, Startups and Small Businesses in UAE. Best Digital Marketing Agency for Small Startups.in Dubai UAE. https://internetmarketingseo2.info/ Responsive Web Design in Dubai UAE Best Digital Marketing Agency in Dubai UAE : Digital Marketing Agency in Dubai for Entrepreneurs, Startups and Small Businesses in Dubai, United Arab Emirates Best Digital Marketing Agency in Dubai UAE : Digital Marketing Agency in Dubai for Entrepreneurs, Startups and Small Businesses in Dubai, United Arab Emirates Social Media Links : Facebook : Twitter : Youtube  : Linkedin Instagram : Pinterest : TikTok : WordPress : Google : Blogger : Medium : All-Online-Marketing.com
__label__pos
0.970683
Rating: 4.0 ![](./images/logo.png) We're initially provided with a `.ntfs` volume called `family.ntfs`. Mounting the volume can be achived like so: ``` sudo mount family.ntfs /mnt ``` This will position the file at the `/mnt` file path and gives us the ability to explore the filesystem. On initial inspection the operating system present on the drive is Windows based. Alongside this, lots of the files on the system are empty. This explains the tiny `25mb` size. In the `/Users/Family/Documents` path is a text file containing: ``` I keep pictures of my credentials in extended attributes. ``` With my experience I know this is a well known technique for 'hiding' information is a special type of file metadata. This is called the [Extended Attributes](https://en.wikipedia.org/wiki/Extended_file_attributes). Any hidden attributes can be detected using the tool `getfattr`. Running it on the `credentials.txt` reveals a hidden attribute ``` # file: credentials.txt user.FILE0 ``` Reading through the man page of the `getfattr` tool gives the the command line option: ``` --only-values Dump out the raw extended attribute value(s) without encoding them. ``` This therefore, lets us retrieve the data. Pipping it into a file like so: ``` getfattr --only-values credentials.txt > flag.png ``` Gives us the image: ![](./images/flag.png) FLAG: ``` CTF{congratsyoufoundmycreds} ``` Original writeup (https://github.com/AidanFray/CTF_Writeups/blob/master/2019/GoogleCTF/BeginnerQuests/HomeComputer/README.md).
__label__pos
0.999693
Search Images Maps Play YouTube News Gmail Drive More » Sign in Accessible Version For Screenreader Users Patents 1. Advanced Patent Search Publication numberUS7319698 B2 Publication typeGrant Application numberUS 10/378,293 Publication dateJan 15, 2008 Filing dateMar 3, 2003 Priority dateJul 18, 2002 Fee statusLapsed Also published asCN1469581A, US20040013121 Publication number10378293, 378293, US 7319698 B2, US 7319698B2, US-B2-7319698, US7319698 B2, US7319698B2 InventorsLevan Higashigawa, Shoichi Sano, Yuzuru Koga, Tomokazu Shirakura, Makoto Hyuga Original AssigneeFujitsu Limited Export CitationBiBTeX, EndNote, RefMan External Links: USPTO, USPTO Assignment, Espacenet Recovery system for restoring preserved regeneration data US 7319698 B2 Abstract A receiver including: a data regeneration mechanism that receives and regenerates data transmitted in units of a packet; a storage device in which received data is stored; a packet ID extraction unit that extracts a packet ID indicating a packet to be re-received; and a retransmission request unit that transmits retransmission request information, which requests retransmission of a packet to be re-received, to a transmitter. The transmitter retransmits only a packet, which is identified with a packet ID specified in the retransmission request information sent from the receiver, to the receiver. The receiver overwrites data, which is stored in the storage device, with data contained in a packet retransmitted from the transmitter. There is thus provided a recovery system for restoring data, which is preserved in the storage device, while regenerating data without stopping a motion picture irrespective of whether a lost packet is present. Images(10) Previous page Next page Claims(3) 1. A receiver comprising: a received data storage device in which data received from a transmitter in accordance with a protocol including no retransmission procedure is stored; a retransmission frequency determination unit determining the number of times of retransmission, by which said transmitter is requested to retransmit a packet to be re-received, according to data received in accordance with the protocol including no retransmission procedure; a re-reception packet ID transmission unit detecting the same number of packet IDs, which indicate packets to be re-received, as the number of times determined by said retransmission frequency determination unit, and transmitting the packet IDs to said transmitter in accordance with a protocol including a retransmission procedure; a plurality of re-reception packet storage devices in which the same number of packets to be re-received and sent from said transmitter as the number of times determined by said retransmission frequency determination unit is stored after being fetched; and a data synthesizer synthesizing an output of said received data storage device and an output of said re-reception packet storage device so as to produce correct data. 2. A recovery system for restoring preserved regeneration data, comprising a transmitter that transmits data in units of a packet, and a receiver that receives data from said transmitter in units of a packet, wherein: said transmitter includes: a transmitted data storage device in which transmitted data is stored; and an interface through which data is transmitted to said receiver, and through which a packet that is identified with a packet ID indicating a packet to be re-received and being sent from said receiver and that is read from said transmitted data storage device is transmitted to said receiver a plurality of times in response to a request sent from said receiver; said receiver includes: a received data storage device in which data received in accordance with a protocol including no retransmission procedure is stored; a retransmission frequency determination unit that determines the number of times of retransmission, by which said transmitter is requested to retransmit a packet to be re-received, according to data received in accordance with the protocol including no retransmission procedure; a re-reception packet ID transmission unit detecting the same number of packet IDs, which indicate packets to be re-received, as the number of times determined by said retransmission frequency determination unit, and transmitting the packet IDs to said transmitter in accordance with a protocol including a retransmission procedure; a plurality of re-reception packet storage devices in which the same number of packets to be re-received and transmitted from said transmitter as the number of times determined by said retransmission frequency determination unit is stored; and a data synthesizer synthesizing an output of said received data storage device and an output of said re-reception packet storage device so as to produce correct data; said plurality of re-reception packet storage devices receives the last packet to be re-received in accordance with the protocol including a retransmission procedure, and receives packets to be re-received other than the last packet to be re-received in accordance with the protocol including no retransmission procedure. 3. A recovery method for restoring preserved regeneration data, comprising a transmission step of transmitting data from a transmitter in units of a packet, and a reception step of receiving data, which is transmitted at said transmission step, in units of a packet using a receiver, wherein: said transmission step includes an interface step of transmitting data to said receiver in accordance with a protocol including no retransmission procedure, and a storage step of storing transmitted data in a first storage device; said reception step includes: a packet ID extraction step of extracting a packet ID, which indicates a packet to be re-received, from data that is received from said transmitter in accordance with the protocol including no retransmission procedure; a second storage step of storing a packet ID, which indicates a packet to be re-received and is extracted by said packet ID extraction unit, in a second storage device; a regeneration step of regenerating received data; a third storage step of storing received data in a third storage device; and a retransmission setting step of transmitting a packet ID, which indicates a packet to be re-received and stored in said second storage device, to said transmitter in accordance with a protocol including a retransmission procedure after the reception of the data, at said transmission step, a packet identified with a packet ID, which indicates a packet to be re-received and is sent from said retransmission setting unit, is read from said first storage device, and transmitted to said receiver in accordance with the protocol including a retransmission procedure; at said reception step, data that is contained in a packet identified with a packet ID indicating a packet to be re-received and that is stored in said third storage device is overwritten with data contained in a packet that is identified with the packet ID indicating a packet to be re-received and that is sent from said transmitter. Description BACKGROUND OF THE INVENTION 1. Field of the Invention The present invention relates to a recovery system for restoring preserved regeneration data. More particularly, the present invention is concerned with a recovery system that restores data using a backup when a transmitter transmits, in real time, image data or the like to a receiver in the form of a packet and the data received by the receiver suffers an error or a loss. 2. Description of the Related Art A transmitter-receiver system consists mainly of a transmitter that distributes data including image data and places data created in real time, or existing data, on a network, and a receiver that receives and regenerates the data stream. In the transmitter-receiver system, the receiver includes a data reception mechanism, a data regeneration mechanism that regenerates an output of the data reception mechanism, and a storage device in which the output of the data reception mechanism is preserved. The receiver that gives priority to regeneration does not include a large-capacity intermediate buffer. This is intended to prevent the speed of received data from being delayed in a data flow transmitted from the transmitter while keeping a time stamp specified in a data stream transmitted therefrom. Moreover, the receiver does not issue a regeneration request to the transmitter despite the occurrence of a lost packet. When the receiver preserves data while regenerating other data, an error packet or data from which a packet has been lost may be preserved in the storage device included in the receiver, though it depends on the condition of a network or the performance of the receiver (the speed at which a hard disk is written or the capability of a CPU). As for conventional recovery systems for restoring preserved regeneration data, for example, one described in Japanese Unexamined Patent Publication No. 10-70523 is well known. The publication disclosed an art that, when a transmitting side retransmits data, a receiving side indicates in the form of a bit map what packet has caused an error, and transmits the bit map to the transmitting side. Compared with a method in which a transmitting side follows a retransmission procedure every time an error packet is detected, the disclosed method proves efficient. Moreover, a recovery system disclosed in Japanese Unexamined Patent Publication No. 10-308932 is also well known. The publication is analogous to Japanese Unexamined Patent Publication No. 10-70523. Namely, the publication has disclosed an art attempting to improve the efficiency in recognizing an error (in units of a block) and in retransmitting packets (in units of a block) by managing packets in units of a block, which is composed of a plurality of packets, at a receiving side. According to the art disclosed in the Japanese Unexamined Patent Publication No. 10-70523, the efficiency in retransmission is improved. However, because of the inclusion of the retransmission procedure, when motion picture data is transmitted from a transmitting side to a receiving side, an event where part of the motion picture received by the receiving side is lost occurs. This poses a problem in that the motion picture may stop or become hard to view. According to the art disclosed in the Japanese Unexamined Patent Publication No. 10-308932, similarly to the Japanese Unexamined Patent Publication No. 10-70523, when a transmitting side retransmits data, a motion picture received by a receiving side may temporarily stop. SUMMARY OF THE INVENTION Accordingly, an object of the present invention is to provide a recovery system for restoring preserved regeneration data that enables listening and viewing according to restored regeneration data. In the recovery system, a receiving side regenerates data in real time without stopping a motion picture irrespective of whether an error packet or a data from which a packet has been lost is present. During the regeneration, the receiving side compensates for an error or for a loss of a packet in data stored in a storage device in the receiving side. In order to accomplish the above object, according to the first aspect of the present invention, there is provided a receiver that recovers regeneration data using a backup. The receiver includes a retransmission request unit that transmits retransmission request information, which specifies a packet identifier (hereinafter ID) indicating a packet to be reloaded, to a transmitter. Data contained in a packet retransmitted from the transmitter is overwritten on corresponding data stored in a storage device. Consequently, the receiver can preserve regeneration data without suspending regeneration. Moreover, even if an error packet or a data from which a packet has been lost is found, the necessary packet alone is retransmitted, and correct data is stored in the storage device. Therefore, if stored data is regenerated, correct data can be regenerated. According to the second aspect of the present invention, there is provided a transmitter that transmits data to a receiver in units of a packet. The transmitter retransmits only a packet, which is identified with a packet ID specified in reload request information sent from the receiver, to the receiver. Consequently, the transmitter need not preserve all transmitted data. This leads to a saving of hardware resources. According to the third aspect of the present invention, there is provided a recovery system for restoring preserved regeneration data. The recovery system consists mainly of a receiver and a transmitter. The receiver includes: a data regeneration mechanism that receives data in units of a packet and regenerates the received data; a storage device in which the received data is stored; a packet ID extraction unit that extracts a packet ID indicating a packet to be re-received; and a retransmission request unit that transmits retransmission request information, which specifies a packet ID indicating a packet to be re-received, to the transmitter. The transmitter retransmits only a packet, which is identified with the packet ID specified in the retransmission request information sent from the receiver, to the receiver. The receiver overwrites data stored in the storage device with data contained in a packet retransmitted from the transmitter. According to the fourth aspect of the present invention, preferably, the transmitter included in the recovery system in accordance with the third aspect has a buffer memory in which part of data to be transmitted is sequentially and temporarily held. The buffer memory has a size permitting the buffer memory to keep holding one transmitted packet during a predetermined time from the transmission of the one packet to the arrival of the retransmission request information from the receiver. Each packet held in the buffer memory is overwritten with a packet that will be newly transmitted after the elapse of the predetermined time. According to the fifth aspect of the present invention, there is provided a recovery system for restoring preserved regeneration data. The recovery system consists mainly of a transmitter that transmits data in units of a packet and a receiver that receives data from the transmitter in units of a packet. The transmitter includes an interface through which data is transmitted to the receiver in conformity with a protocol stipulating no retransmission procedure, and a first storage device in which transmitted data is stored. The receiver includes: a packet ID extraction unit that extracts a packet ID indicating a packet to be retransmitted from data that is received from the transmitter in conformity with the protocol stipulating no retransmission procedure; a second storage device in which a packet ID indicating a packet to be retransmitted and being extracted by the packet ID extraction unit is stored; a regeneration unit that regenerates received data; a third storage device in which the received data is stored; and a retransmission setting unit that transmits a packet ID, which indicates a packet to be re-received and is stored in the second storage device, to the transmitter in conformity with a protocol stipulating a retransmission procedure after reception of data. The transmitter reads a packet, which is identified by the packet ID that indicates a packet to be retransmitted and that is transmitted from the retransmission setting unit, from the first storage device. The transmitter then transmits the read packet to the receiver in conformity with the protocol stipulating a retransmission procedure. The receiver overwrites data, which is identified with the packet ID indicating a packet to be re-received and is stored in the third storage device, with data identified with the packet ID indicating a packet to be re-received and being sent from the transmitter. Consequently, the receiver receives data in conformity with the protocol stipulating no retransmission procedure and regenerates the data. Consequently, data can be regenerated in real time without the necessity of stopping a motion picture. Moreover, a packet to be reloaded is received in conformity with the protocol stipulating a retransmission procedure. A packet to be re-received can be received reliably. Moreover, a data synthesizer produces correct data all the time. Consequently, listening and viewing are enabled in nearly real time according to data that includes correct image data and is devoid of an error or a loss. According to the present invention, there is provided a program that instructs a computer to implement a method and a procedure according to which a receiver operates. BRIEF DESCRIPTION OF THE DRAWINGS FIG. 1 is a block diagram showing the configuration of a recovery system for restoring preserved regeneration data in accordance with a first embodiment of the present invention; FIG. 2 shows the first example of the system configuration employed in the first embodiment; FIG. 3 shows the second example of the system configuration employed in the first embodiment; FIG. 4 is a block diagram showing the outline configuration of a recovery system for restoring preserved regeneration data in accordance with a second embodiment of the present invention; FIG. 5 is a block diagram showing the configuration of a server included in the first example of the second embodiment of the present invention; FIG. 6 is a block diagram showing the configuration of a client included in the first example of the second embodiment of the present invention; FIG. 7 is a block diagram showing the configuration of a client included in the second example of the second embodiment of the present invention; FIG. 8 is a block diagram showing the configuration of a server 41 c included in the third example of the second embodiment of the present invention; and FIG. 9 is a block diagram showing the configuration of a client 42 c included in the third example of the second embodiment of the present invention. DESCRIPTION OF THE PREFERRED EMBODIMENTS Embodiments of the present invention will be described, in conjunction with the drawings, below. FIG. 1 is a block diagram showing the configuration of a recovery system for restoring preserved regeneration data in accordance with a first embodiment of the present invention. Referring to FIG. 1, the recovery system consists mainly of a transmitter (hereinafter referred to as a server 10) and a receiver (hereinafter referred to as a client 14). The server 10 includes a data creation mechanism 11, a data transmission mechanism 12, and a storage device (hard disk HD) 13. The client 14 includes a data reception mechanism 15, a data regeneration mechanism 16, a storage device (hard disk HD) 17, a packet ID extraction unit 18, and a reload request unit 19. Next, the operation of the system shown in FIG. 1 will be described below. In the server 10, the data creation mechanism 11 creates data in units of a packet using real-time data produced by a video camera or a microphone or existing data stored on a digital versatile (or video) disk (DVD) or a tape recorder. Image data and/or audio data created by the data creation mechanism are recorded in the storage device 13. The data transmission mechanism 12 transmits the data created by the data creation mechanism 11 or the data stored in the storage device 13 in units of a packet. In the client 14, the data reception mechanism 15 receives data from the server 10 in units of a packet. The data regeneration mechanism 16 regenerates the data contained in each of packets transferred from the data reception mechanism 15, and displays an image according to the data. The output of the data reception mechanism 15 is stored as regeneration data in the storage device 17. Even if an error has occurred in a received packet or even if a packet has lost from the received data, some data is stored in an area allocated to the packet. The packet ID extraction unit 18 extracts a packet identifier (ID) indicating a packet that must be re-received because an error has occurred in the data received by the data reception mechanism 15 or because a packet has lost from the received data. The retransmission request unit 19 transmits retransmission request information, which specifies a packet ID indicating a packet which must be retransmitted, to the data transmission mechanism 12 included in the server 10. The data transmission mechanism 12 retransmits only a packet, which is identified with the packet ID specified in the retransmission (reload) request information sent from the retransmission (reload) request unit 19, to the data reception mechanism 15 included in the client 14. The data reception mechanism 15 overwrites a corresponding packet stored in the storage device 17 with a packet retransmitted from the data transmission mechanism 12. Consequently, the data regeneration mechanism 16 regenerates in real time data received from the server 10. At the same time, correct data devoid of an error or a loss stemming from transmission performed in units of a packet is stored in the storage device 17. The stored data is therefore restored efficiently. FIG. 2 shows a first example of the system configuration employed in the first embodiment. In this example, transmitted data is not recorded in the storage device 13 included in the server 10. The data reception mechanism 15 receives data in units of a packet, that is, receives packets 1 to m. In the illustrated example, packet 2 is an error packet, and packets (n+1) to (m−1) are lost packets. Null data is written in areas in the storage device 17 allocated to the error packet and the lost packets. The null data may be any data with which the areas allocated to the packets are filled. Namely, an error may be stored as it is or all-zero data may be stored. The ID of the error packet and the IDs of the lost packets are extracted by the packet ID extraction unit 18. The retransmission request unit 19 requests the data transmission mechanism 12 included in the server 10 to retransmit packets identified with the extracted packet IDs. The data transmission mechanism 12 includes a buffer memory 21 in which part of data to be transmitted is sequentially temporarily held, and a subsequent packet memory 22 in which packets to be retransmitted are accumulated. The buffer memory has a size permitting the buffer memory to keep holding one transmitted packet during a predetermined time from the transmission of the one packet to the arrival of a retransmission request issued from the retransmission request unit 19 included in the client 14. Each packet in the buffer memory 21 is overwritten with a packet to be newly transmitted after the elapse of the predetermined time. When a retransmission request arrives, a packet identified with a packet ID specified in the request is copied from the buffer memory 21 to the subsequent packet memory 22 and is thus accumulated together with the packet ID in the subsequent packet memory 22. The contents of the subsequent packet memory 22 are transmitted to the client 14 according to proper timing. The client 14 acquires the subsequent packet as a recovery packet, and overwrites the null packet, which is stored in the storage device 17, with the recovery packet. The proper timing of transmitting data to be retransmitted may be the time within an available time during data transmission or the time after transmission of one video data. Thus, restored data is stored in the storage device 17 included in the client 14. Correct data, devoid of an error or a loss, stemming from transmission performed in units of a packet, can therefore be regenerated. FIG. 3 shows the second example of the system configuration employed in the first embodiment. In this example, all of data to be transmitted from the server is preserved in the storage device 13. Moreover, if a plurality of clients X and Y is present, memories 31X and 31Y associated with the clients are included in the server. Only a packet identified with a packet ID specified in retransmission request information sent from a receiver included in the client X is stored in the memory 31X associated with the client X. The packet is read from the memory 31X and retransmitted to the client X according to proper timing in the same manner as it is shown in FIG. 2. Similarly to the client included in the example shown in FIG. 2, the client X acquires the packet as a recovery packet, and overwrites a null packet stored in the storage device 17 with the recovery packet. In response to a retransmission request issued from the client Y, only a packet identified with a packet ID specified in reload request information is stored in the memory 31Y. The packet is read from the memory 31Y and then retransmitted to the client Y. The same applies to the case where a client Z and other clients that are not shown are present in addition to the clients X and Y. Namely, memories are included to be associated with the clients. Consequently, even if a plurality of clients is present, a necessary packet should merely be copied from the storage device 13 to an associated memory. Thus, the contents of a storage device included in each client can be restored efficiently. FIG. 4 is a block diagram showing the outline configuration of a recovery system for restoring preserved regeneration data in accordance with a second embodiment of the present invention. Referring to FIG. 4, the system consists mainly of a station (ST) 41 serving as a server (hereinafter a server 41), a home-use simple Internet terminal (Web pad) or a personal computer 42 serving as a client (hereinafter a client 42), and a TV monitor 43. The server 41 acts as an access point on a wireless LAN and includes a TV tuner, a hard disk, and a motion picture encoder. The server 41 transmits a television signal to the client 42 by radio. The client 42 enjoys television broadcasting on the TV monitor 53 connected thereto. The server 41 includes a hard disk (HDD) 44. When a transmitted television signal is recorded in the hard disk 44, the server 41 is used as a personal video recorder (PVR). Likewise, the client 42 includes a hard disk 45. When a received television signal is recorded in the hard disk 45, the client 42 is used as a personal video recorder (PVR). The server 41 encodes a television video signal, and transmits the resultant signal to the client 42 by radio. The client 42 decodes the received signal and displays a television picture on the TV monitor 43. The client 42 is normally a portable machine such as a Web pad or a personal computer. The client 42 can be readily moved in order to watch television. The system has drawbacks described below. (1) As the system is a wireless system, there is a high possibility that data may be lost. (2) As a motion picture is transmitted, a certain number of frames (for example, 30 frames per sec) must be attained per unit time. (3) As a motion picture is transmitted, the amount of data to be manipulated is large. Moreover, if an attempt is made to transfer a file while watching television, it takes much time to transfer a file. In consideration of the above drawbacks, the following requirements for the system must be satisfied: (1) It should be possible to watch television in real time. (2) Video data should be preserved in the client 42. The description given in conjunction with FIG. 4 may be applied to the first embodiment described in conjunction with FIG. 1 to FIG. 3. In order to make it possible to watch television in real time, a certain number of frames must be attained per unit time. For this reason, a protocol stipulating no retransmission procedure (for example, a user datagram protocol (UDP)) should be adopted. However, in order to preserve video data in the client 42, a protocol stipulating a retransmission procedure (for example, a transfer control protocol (TCP)) should be adopted as a protocol capable of guaranteeing data. After data is transmitted in conformity with a protocol stipulating no retransmission procedure, if backup data is transmitted in conformity with a protocol stipulating a retransmission procedure, it takes much time to transfer a file. According to the present embodiment, the above problem is resolved by adopting any of techniques implemented in example 1 to example 3 of the present embodiment. FIG. 5 is a block diagram showing the configuration of a server included in example 1 of the second embodiment of the present invention. Referring to FIG. 5, a server 41 a consists mainly of a TV tuner 52, an encoder 53, a packet ID appending unit 54, a first storage device (hard disk (HD1)) 55, a lost packet ID reader 56, a retransmission setting unit 57, and a wireless interface 58. The TV tuner 52 selects a desired signal component from a signal received through a TV antenna 51. The encoder 53 encodes the selected signal component. The packet ID appending unit 54 produces a packet containing the encoded signal, and appends a serial number as a packet ID to the packet. Transmitted data is preserved in the first storage device 55. FIG. 6 is a block diagram showing the configuration of a client included in example 1 of the second embodiment of the present invention. Referring to FIG. 6, a client 42 a consists mainly of a wireless interface 61, a packet ID extraction unit 62, a second storage device (HD2) 63, a decoder 64, a video display unit 65, a third storage device (HD3) 66, and a retransmission setting unit 67. The packet ID extraction unit 62 extracts a lost packet ID. The packet ID of a lost packet extracted by the packet ID extraction unit 62 is stored in the second storage device 63. The decoder 64 decodes received data. The received data is stored in the third storage device 66. The retransmission setting unit 67 transmits the packet ID of the lost packet, which is stored in the second storage device 63, to the transmitter in conformity with a protocol stipulating a retransmission procedure after reception of data. Next, the operation of the system including the server and client shown in FIG. 5 and FIG. 6 respectively will be described below. At the server 41 a, the TV tuner 52 selects a desired frequency component from a signal received through the TV antenna 51. The encoder 53 encodes the frequency component into a digital signal. The packet ID appending unit 54 produces a packet containing the digital signal and appends a packet ID to the packet. The packet ID is a serial number to be assigned in order to a received packet. However, the packet ID is not limited to the serial number but may be any identification code. The packet having the packet ID appended thereto is transmitted to the client 42 a via the wireless interface 58 in conformity with a protocol stipulating no retransmission procedure, that is, the UDP. The UDP is adopted as the protocol because TCP is unsuitable. Namely, if the TCP is adopted, a retransmission procedure of retransmitting a backup of a lost packet must be included. Therefore, when a motion picture must be transmitted at a predetermined speed of a predetermined number of frames per unit time, the predetermined number of frames cannot be transmitted during the unit time. In the client 42 a shown in FIG. 6, video data received in conformity with the UDP is transferred to the packet ID extraction unit 62 via the wireless interface 61. It is then checked whether a packet to be re-received, such as a lost packet or an error packet (the lost packet refers to a packet that must be reloaded), is present. Independently of the checking of whether the lost packet is present, a received packet is decoded by the decoder 64, and converted into a video signal by the video display unit 65. Consequently, a motion picture is displayed on the monitor. Since the UDP is a protocol stipulating no retransmission procedure, a motion picture represented by video data transmitted from the server 41 a is displayed on the monitor nearly in real time. On the other hand, when a packet to be re-received is detected, the packet ID of the packet is stored in the second storage device 63. The retransmission setting unit 67 transmits the packet ID to the server 41 a via the wireless interface 61 in conformity with the TCP. Independently of extraction of a lost packet, a received packet is stored as it is in the third storage device (HD3) 66. In the server 41 a, the lost packet ID reader 56 receives a lost packet ID from the client 42 a in conformity with the TCP, and reads a packet identified with the lost packet ID from the first storage device 55. The read packet is transmitted to the client 42 a via the retransmission setting unit 57 and wireless interface 58 in conformity with the TCP. In the client 42 a, a packet identified with the lost packet ID and stored in the third storage device (HD3) is overwritten with the received packet. Thus, both transmission of a motion picture in conformity with the UDP and transmission thereof in conformity with the TCP are utilized. While watching television is permitted without stoppage of a motion picture, data devoid of a loss or an error can be stored in the third storage device 66. Consequently, when the data stored in the third storage device 66 is regenerated later, a normal motion picture and sound can be enjoyed. After transmission is performed in conformity with the UDP, if transmission is performed in conformity with the TCP, a time corresponding to the sum of an UDP transmission time and a TCP transmission time is needed. For example, assuming that a movie is one hour long, a processing time is 2.1 hours calculated by summing up one hour and 1.1 hours (because of retransmission). In contrast, in example 1 of the present embodiment, a processing time is 1.1 hours calculated by summing up the UDP transmission time and a retransmission time. According to the art described in the Japanese Unexamined Patent Publication No. 10-70523, a processing time is 1.1 hours calculated by summing up a transmission time and a retransmission time. Since transmission and retransmission are performed nearly concurrently, a motion picture may stop temporarily. The foregoing processing times are listed below. TABLE 1 Estimated processing times for a motion picture of one hour long Processing time Merit and demerit Related art UDP transmission Display screen: time + TCP uninterrupted transmission time = Time: long 1 hour + 1.1 hour Known art UDP transmission Display screen: time + interrupted unprecedented Time: short retransmission time = 1 hour + 0.1 hour (retransmission time) Present embodiment UDP transmission Display screen: (example 1) time + unique uninterrupted retransmission time = Time: short 1 hour + 0.1 hour (retransmission time) By appropriately setting the retransmission setting unit 57 shown in FIG. 5 and/or the retransmission setting unit 67 shown in FIG. 6, a user can designate whether a packet identified with a lost packet ID should be retransmitted. FIG. 7 is a block diagram showing the configuration of a client included in example 2 of the second embodiment of the present invention. Referring to FIG. 7, the same reference numerals are assigned to components identical to those of the client included in example 1 shown in FIG. 6. In this example, a client 42 b consists mainly of a packet ID transmission unit 71, a fourth storage device (HD4) 62, and a correct data production unit 73. The packet ID transmission unit 71 transmits the ID of a lost packet to the server in conformity with the TCP. A packet identified with the lost packet ID is fetched and stored in the fourth storage device 72. The correct data production unit 73 synthesizes an output of the third storage device 66 and an output of the fourth storage device 72 so as to produce correct data. The configuration of the server included in example 2 is identical to the one included in example 1 and shown in FIG. 5. Next, the operation of a recovery system for restoring preserved regeneration data in accordance with example 2 will be described below. Transmission of a packet from the server to the client 42 b is identical to that in example 1. The packet ID transmission unit 71 transmits the ID of a lost packet extracted by the packet ID extraction unit 62 to the server via the wireless interface 61 in conformity with TCP. The server having received the lost packet ID retransmits a packet identified with the lost packet ID in conformity with TCP in the same manner as it does in example 1. The packet identified with the lost packet ID is stored in the fourth storage device (HD4) 72. The correct data production unit 73 synthesizes an output of the third storage device 66 and an output of the fourth storage device 72 and transfers the resultant correct data to the decoder 64. The decoder 64 decodes the data into an analog signal. The video display unit 65 transfers the analog signal to the monitor. According to the foregoing example 2, compared with example 1, as no data is lost, the image quality discerned on the monitor will not deteriorate. However, before a motion picture is displayed on the monitor, transmission of a lost packet ID in conformity with the TCP and transmission of the lost packet in conformity with the TCP are carried out concurrently. Therefore, a short time lag occurs. FIG. 8 is a block diagram showing the configuration of a server 41 c included in example 3 of the second embodiment of the present invention. Referring to FIG. 8, the same reference numerals are assigned to components identical to those shown in FIG. 5. In this example, the client 41 c includes a plurality of lost packet readers 81, 82, and 83 so that a plurality of lost packet IDs can be received from the client in conformity with TCP. Herein, every time a lost packet ID is received from the client, a packet identified with the lost packet ID is read from the first storage device (HD1) 55 and transmitted to the client. In the illustrated example, the number of lost packet readers is 3. The number of lost packet readers may be set to any required value. Transmitting a packet identified with a lost packet ID to the client is performed in conformity with UDP except for transmission of a packet identified with the last lost packet ID. The packet identified with last lost packet ID alone is transmitted to the client in conformity with the TCP. FIG. 9 is a block diagram showing the configuration of a client 42 c included in example 3 of the second embodiment of the present invention. Referring to FIG. 9, the same reference numerals are assigned to components identical to those shown in FIG. 7. The client 42 c included in example 3 has a plurality of lost packet ID extraction units 62, 94, and 95, a retransmission frequency determination unit 90, a plurality of lost packet ID transmission units 91, 92, and 93, a plurality of lost packet storage devices 96, 97, and 98, and a data synthesizer 99. The retransmission frequency determination unit 90 determines the number of times of retransmission, by which the server is requested to retransmit a packet identified with a lost packet ID, according to an error rate or a lost packet occurrence rate detected from data received in conformity with the UDP. The plurality of lost packet ID transmission units 91, 92, and 93 detects the same number of packet IDs indicating packets to be reloaded as the number of times determined by the retransmission frequency determination unit 90. The plurality of lost packet ID transmission units 91, 92, and 93 then transmits the detected packet IDs to the transmitter in conformity with a protocol stipulating a retransmission procedure. The same number of packets, which are identified with the lost packet IDs and sent from the server, as the number of times determined by the retransmission frequency determination unit 90, is fetched and stored in the plurality of lost packet storage devices 96, 97, and 98. The data synthesizer 99 synthesizes an output of a received data storage device 63 with outputs of the plurality of lost packet storage devices 96, 97, and 98 so as to produce correct data. Next, the operation of the system, including the server and client shown in FIG. 8 and FIG. 9 respectively, will be described below. Video data having packet IDs appended thereto is transmitted from the server 41 c to the client 42 c in conformity with UDP. The client 42 c receives the video data in conformity with UDP, and checks the packet IDs. The packet ID extraction unit 62 included in the client 42 c detects whether a lost packet or an error packet is present, and transfers lost packet IDs to the packet ID transmission unit 91 of the first stage. The retransmission frequency determination unit 90 determines the number of times of retransmission on the basis of the output of the packet ID transmission unit 91. In the illustrated example, the number of times of retransmission is three. Depending on the judgment made by the retransmission frequency determination unit 90, the number of times of retransmission may be larger or smaller. The lost packet ID transmission unit 91 transmits the lost packet IDs to the server 41 c in conformity with TCP. In the server 41 c having received the lost packet IDs, the lost packet reader 81 reads packets, which are identified with the lost packet IDs, from the first storage device 55 and retransmits the packets to the client 42 c in conformity with UDP. The client 42 c stores the retransmitted packets having the lost packet IDs in the fourth storage device (HD4) 96 via the second lost packet extraction unit 94. On the other hand, the second lost packet ID extraction unit 94 judges whether the received packets having the lost packet IDs further include a lost packet. The retransmission frequency determination unit 90 re-determines the number of times of retransmission according to an output of the second lost packet ID extraction unit 94. Assume that it is determined that two more times of retransmission are needed. The second lost packet ID transmission unit 92 transmits lost packet IDs to the server 41 c in conformity with the TCP. In the server 41 c having received the lost packet IDS, the second lost packet reader 82 reads packets, which are identified with the lost packet IDs, from the first storage device 55, and retransmits the packets to the client 42 c in conformity with UDP. The client 42 c stores the retransmitted packets having the lost packet IDs in the fifth storage device (HD5) 97 via the third lost packet extraction unit 95. On the other hand, the third lost packet ID extraction unit 95 judges whether the received packets having the lost packet IDs further include a lost packet. The retransmission frequency determination unit 90 re-determines the number of times of retransmission according to an output of the third lost packet ID extraction unit 95. If it is determined that one more time of retransmission is needed, the third lost packet ID transmission unit 93 transmits a lost packet ID to the server 41 c in conformity with the TCP. In the server 41 c having received the lost packet ID, the third lost packet reader 83 reads a packet, which is identified with the lost packet ID, from the first storage device 55, and retransmits the packet to the client 42 c in conformity with TCP. The client 42 c stores the retransmitted packet having the lost packet ID in the sixth storage device (HD6) 98. As mentioned above, the last packet having the lost packet ID is retransmitted in conformity with TCP, and the other packets are transmitted in conformity with UDP. The data synthesizer 99 synthesizes outputs of the third storage device (HD3) 63, fourth storage device (HD4) 96, fifth storage device (HD5) 97, and sixth storage device (HD6) 98 so as to produce correct video data. The video data is stored in a seventh storage device (HD7) 101. The decoder 64 decodes the video data into an analog signal, and the video display unit 65 transfers the analog signal to the monitor. As mentioned above, according to example 3 of the second embodiment of the present invention that is a development of example 2, retransmission data (retransmission 1) is transmitted in conformity with UDP. When it is detected that some packets, in data transmitted in conformity with UDP, have been lost, retransmission of the packets (retransmission 2) is requested. Furthermore, when it is detected that a packet in the data retransmitted during retransmission 2 has been lost, the packet is retransmitted (retransmission 3). Thus, retransmission in conformity with UDP is performed at a plurality of steps. At the last step, retransmission is performed in conformity with TCP. Moreover, an amount of data to be retransmitted each time may be monitored in order to dynamically change the number of retransmission steps. If a large number of packets has lost, a delay time to be given to a video signal representing a motion picture (a buffering time in the HD3) is increased and the number of retransmission steps is increased. Thus, the losses can be compensated for. According to example 2, retransmission in conformity with TCP is added. In contrast, according to example 3, lost data is compensated for by performing many retransmissions in conformity with UDP before performing retransmission in conformity with TCP. Consequently, the load to be incurred by a network can be reduced and a delay time can be shortened. If the number of lost packets is small, a delay to be given to a video signal representing a motion picture by the HD3 (a buffering time in the HD3) is reduced and the number of retransmission steps is decreased. Consequently, the total delay time to be given to the video signal representing a motion picture can be shortened. If the number of lost packets is negligible, retransmission to be performed in conformity with TCP may be omitted. Consequently, depending on the condition of a network, a delay time to be given to a video signal representing a motion picture and image quality can be automatically balanced. Moreover, priority levels assigned to the delay time and image quality can be varied arbitrarily and manually. As is apparent from the above description, according to the present invention, there is provided a recovery system for restoring preserved regeneration data that enables viewing and listening using restored regeneration data. In the recovery system, a receiving side regenerates data without stopping a motion picture irrespective of whether an error packet or a lost packet is present. Meanwhile, the receiving side compensates for an error or a loss contained in data preserved in a storage device included therein. Patent Citations Cited PatentFiling datePublication dateApplicantTitle US6377981Nov 20, 1997Apr 23, 2002Cyberstar, L.P.Modular digital data communication cyberstation and cyberserver US20020152299 *Jan 22, 2002Oct 17, 2002Traversat Bernard A.Reliable peer-to-peer connections US20050094667 *Nov 15, 2004May 5, 2005Telefonaktiebolaget Lm Ericsson (Publ)Flexible ARQ for packet data transmission US20060155870 *Mar 14, 2006Jul 13, 2006Apple Computer Inc.Connectionless protocol US20060256794 *Apr 18, 2006Nov 16, 2006Qualcomm IncorporatedMethod and apparatus for resolving ambiguity in reception of multiple retransmitted frames EP1164790A2Jun 8, 2001Dec 19, 2001Sony CorporationTelevision reception system, channel selection apparatus and display apparatus JP2001177523A Title not available JP2001186423A Title not available JP2001218273A Title not available JP2001358966A Title not available JP2001524765A Title not available JP2002084239A Title not available JP2002169738A Title not available JPH1070523A Title not available JPH1118086A Title not available JPH06252897A Title not available JPH07221789A Title not available JPH10308932A Title not available WO1999027462A1Nov 12, 1998Jun 3, 1999Cyberstar L PModular digital data communication cyberstation and cyberserver Referenced by Citing PatentFiling datePublication dateApplicantTitle US7516257 *Sep 27, 2005Apr 7, 2009Intel CorporationMechanism to handle uncorrectable write data errors US7663665 *Apr 24, 2007Feb 16, 2010Canon Kabushiki KaishaCommunication device and method for transferring video-stream data to a display device and a storage device US8276034 *Dec 23, 2008Sep 25, 2012Ricoh Company, LimitedInformation processing apparatus, information processing method, and computer program product US20090199060 *Dec 23, 2008Aug 6, 2009Ricoh Company, Ltd.Information processing apparatus, information processing method, and computer program product US20110154172 *Dec 13, 2010Jun 23, 2011Electronics And Telecommunications Research InstituteApparatus and method for assessing image quality in real-time Classifications U.S. Classification370/394, 370/474, 714/748, 714/749, 714/746 International ClassificationH04N5/91, H04L12/28, H04N5/765, H04L1/16, H04L12/56 Cooperative ClassificationH04L1/16 European ClassificationH04L1/16 Legal Events DateCodeEventDescription Mar 6, 2012FPExpired due to failure to pay maintenance fee Effective date: 20120115 Jan 15, 2012LAPSLapse for failure to pay maintenance fees Aug 22, 2011REMIMaintenance fee reminder mailed Mar 3, 2003ASAssignment Owner name: FUJITSU LIMITED, JAPAN Free format text: ASSIGNMENT OF ASSIGNORS INTEREST;ASSIGNORS:HIGASHIGAWA, LEVAN;SANO, SHOICHI;KOGA, YUZURU;AND OTHERS;REEL/FRAME:013836/0487 Effective date: 20030109
__label__pos
0.695351
 Python: Find the number of combinations of a,b,c and d - w3resource w3resource Python: Find the number of combinations of a,b,c and d Python Basic - 1: Exercise-37 with Solution Write a Python program which reads an integer n and find the number of combinations of a,b,c and d (0 ≤ a,b,c,d ≤ 9) where (a + b + c + d) will be equal to n. Input: n (1 ≤ n ≤ 50) Sample Solution: Python Code: import itertools print("Input the number(n):") n=int(input()) result=0 for (i,j,k) in itertools.product(range(10),range(10),range(10)): result+=(0<=n-(i+j+k)<=9) print("Number of combinations:",result) Sample Output: Input the number(n): 15 Number of combinations: 592 Flowchart: Flowchart: Python - Find the number of combinations of a,b,c and d Python Code Editor: Have another way to solve this solution? Contribute your code (and comments) through Disqus. Previous: Write a Python program to compute the amount of the debt in n months. The borrowing amount is $100,000 and the loan adds 5% interest of the debt and rounds it to the nearest 1,000 above month by month. Next: Write a Python program to print the number of prime numbers which are less than or equal to an given integer. What is the difficulty level of this exercise? Test your Python skills with w3resource's quiz  Python: Tips of the Day Checks if the given number falls within the given range. Example: def tips_range(n, start, end = 0): return start <= n <= end if end >= start else end <= n <= start print(tips_range(2, 4, 6)) print(tips_range(4, 8)) print(tips_range(1, 3, 5)) print(tips_range(1, 3)) Output: False True False True  
__label__pos
0.998989
Scilab Home page | Wiki | Bug tracker | Forge | Mailing list archives | ATOMS | File exchange Please login or create an account Change language to: English - Français - Português - Please note that the recommended version of Scilab is 6.1.0. This page might be outdated. See the recommended documentation of this function Scilab help >> Elementary Functions > Complex > complex complex Create a complex number. Calling Sequence c=complex(a) c=complex(a,b) Arguments a a 1-by-1 or a n-by-m real matrix of doubles, the real part. If a has an imaginary part, an error is generated. b a 1-by-1 or a n-by-m real matrix of doubles, the imaginary part (default b=0). If b has an imaginary part, an error is generated. c a n-by-m complex matrix of doubles, the complex number. Description c=complex(a) creates a complex number from its real part a and zero as the imaginary part. c=complex(a,b) creates a complex number from its real part a and imaginary part b. This function is a substitute for expressions such as a+%i*b, especially in cases where the complex arithmetic interferes with particular floating point numbers such as %inf or %nan. Examples In the following example, we create a complex number from its real and imaginary parts. complex(1,2) complex([1 2],[3 4]) If a only is specified, then the imaginary part is set to zero. complex([1 2 3]) If a is a scalar and b is a matrix, then the result c has the same size as b. Similarily, if b is a scalar and a is a matrix, then the result c has the same size as a. c = complex([1 2 3], 4) c = complex(1, [2 3 4]) If a and b are two matrices with different sizes, an error is generated, as in the following session. -->complex(ones(2,3),ones(4,5)) !--error 10000 complex: Incompatible input arguments #1 and #2: Same sizes expected. at line 33 of function complex called by : complex(ones(2,3),ones(4,5)) The purpose of the complex function is to manage IEEE floating point numbers such as Nans or Infinities. In the following example, we show that creating a complex number where the real and imaginary parts are complex is not straightforward if we use the complex arithmetic. This is because the product %i times %inf is evaluated as (0+%i) * (%inf+%i*0). This produces the intermediate expression 0*%inf, which is %nan. -->%inf+%i*%inf ans = Nan + Inf The solution of this issue is to use the complex function. -->complex(%inf,%inf) ans = Inf + Inf See Also • imult — 虚数単位 i を乗じる Authors INRIA - Farid Belahcene 2011 - DIGITEO - Michael Baudin Scilab Enterprises Copyright (c) 2011-2017 (Scilab Enterprises) Copyright (c) 1989-2012 (INRIA) Copyright (c) 1989-2007 (ENPC) with contributors Last updated: Thu May 12 11:45:48 CEST 2011
__label__pos
0.840274
page.title Demo ( Source code ) Sample reviews aab2ce09bb322cca3d1835cd6ea6470844d83d8287db6770a0102fbd0457a291 N76pbr2oXJau8S4ksqnQxLjhqUzud5Yd6o or Connect with Traity to import your own reviews from different providers How does it work? As explained in our Decentralized Identity and Reputation draft, the 'cards' have to be signed to verify their authenticity. By signing the content we can confirm who generated them and we can also assure that the content was not tampered. Imagine we have this review (please refer to the review specification to see its format): [{ "name": "external_api_seller", "text": "Wonderful buyer, very fast payment.", "provider": "ebay", "created_at": 1424858051, "version": "1.0", "ratings": [ { "score": 87.23, "version": "0.2", "provider": "traity" } ] }] In order to sign the document, we need at least one private key. Using the example private key e2a4188f9adc09c0582c9740741117e0ada281c56e53164c9d7ef1fe882ccd95 we would generate the following document: [ { "review": { "name": "external_api_seller", "text": "Wonderful buyer, very fast payment.", "provider": "ebay", "created_at": 1424858051, "version": "1.0", "ratings": [ { "score": 87.5, "version": "0.2", "provider": "traity" } ] }, "signatures": [ { "address": "MwesPafEAcvxD2cPjMAFMVfpT26Lwkjfpv", "signature": "Hx4pyYBluWIyo5bvXk92cm1Rg1MJAmWtZcPuokOmvsMo7N8gkR8xKX13cdjSS9WcL9VRLsB0spyMDvf/koRBmj0=" } ] } ] The signature is generated with the ECDSA algorithm as used in Bitcoin, using the SHA2 digest of the document as the payload. Having this signature, we could easily detect if the text was modified or not: Bitcoin.network = :namecoin payload = Digest::SHA2.hexdigest(JSON.parse(review).to_json) puts Bitcoin.verify_message(address, signature, payload) Now, because this document has sensitive data that we might not want to store in the blockchain, we will generate a hash for the list of reviews and that is what we will store: 9bd4eda087f1dd60448c808bac0e713e2acf322ee97a02a15dfbcc8fc7145b9f Eventually, we also would like to confirm if the stored value was really generated by an authorized user, so we will add a signature in the stored document in order to be able to verify it using the public address, having this final value: { "hash":"9bd4eda087f1dd60448c808bac0e713e2acf322ee97a02a15dfbcc8fc7145b9f", "signature":"IINRlcFmrUcMmeBrKsM2Syz3IyAZznTuujx/HXB978JvzFjn+hHzt4M6+bFpx9ZrqGFrujF9zTGshDX5LME4Sw8=" } Generating the identifier As we mentioned before, we are using Namecoin's blockchain which can be a used as a key-value data store. From now, we will refer to the key as the identifier in order to prevent confussions with the private/public key term. The chosen identifier should accomplish the following points: In order to accomplish the first point, we will use the user Namecoin address as the first part of the identifier. So, by knowing the address which is a public information and does not reveal any private data, we could find the list of hashes for a specific user: /namecoind name_filter "MwesPafEAcvxD2cPjMAFMVfpT26Lwkjfpv_*" Because the identifier should be idempotent, we should know in advance how it will be calculated. This leads to the problem that someone could generate this identifier in our behalf. This can be solved if this identifier is generated using certain information that is only known by us like the private key we used to sign the reviews. So, the second part of the identifier is generated in this way: <address> + "_" + hash160("reviews_" + <provider> + "_" + <private_key>) which generates the following identifier for the provider 'airbnb' and: MwesPafEAcvxD2cPjMAFMVfpT26Lwkjfpv_67046ab4b5629a1e33993d5570a31cb3075f3a5
__label__pos
0.888412
Take the 2-minute tour × Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free, no registration required. I am new to Cocoa. I successfully created a TableView with two colomns , both containing text, using NSArrayController and binding . Now I want to achieve the same for custom TableView. My TableView contains two colomns, one should contain image and the other one should contain image description. I want to populate my table using binding . Can any one tell me how to achieve this. share|improve this question 1 Answer 1 Follow the following steps: In Xib/Nib files: 1. Take NSArrayController. 2. Take NSTableView and take two columns. 3. In first column drag NSImageCell and second one will be by default NSTextCell. 4. Now bind: • First arraycontroller to your NSObject or FileOwners. • Your mutablearray to arraycontroller . • Tableview first column to array controller inside whose controller key should be arrangesObjects and modelkeypath should be img. • Tableview second column to array controller inside whose controller key should be arrangesObjects and modelkeypath should be desc. (Note: While binding to table columns please check one option Validates Immediately, if you are using >10.6 os version) In header file: NSString *img; NSString *desc; NSMutableArray *arr; IBOutlet NSArrayController *arrCont; @property (readwrite,retain) NSString *img; @property (readwrite,retain) NSString *desc; @property (readwrite,retain) NSMutableArray *arr; In implementation file: self.arr = [[NSMutableArray alloc] init]; NSString *imageName = [[NSBundle mainBundle] pathForResource:@"yourImageName" ofType:@"tiff"]; NSImage *photoImage = [[NSImage alloc] initWithContentsOfFile:imageName]; NSMutableDictionary *dc = [NSMutableDictionary dictionary]; [dc setObject:photoImage forKey:@"img"]; [dc setObject:@"noImage" forKey:@"desc"]; [arr addObject:dc]; [self setArr:arr]; share|improve this answer Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.904155
Selligent to Panoply This page provides you with instructions on how to extract data from Selligent and load it into Panoply. (If this manual process sounds onerous, check out Stitch, which can do all the heavy lifting for you in just a few clicks.) What is Selligent? Selligent is a B2C omnichannel marketing automation platform. What is Panoply? Panoply can spin up a new Amazon Redshift instance in just a few clicks. Panoply's managed data warehouse service uses machine learning and natural language processing (NLP) to learn, model, and automate data management activities from source to analysis. It can import data with no schema, no modeling, and no configuration, and lets you use analysis, SQL, and visualization tools just as you would if you were creating a Redshift data warehouse on your own. Getting data out of Selligent Selligent exposes data on programs, transactional mailings, and data sources via an API. We weren't able to find any online documentation on using Selligent's API, so you'll have to work closely with the company to create code to access your data. Loading data into Panoply When you've identified all of the columns you want to insert, use the Reshift CREATE TABLE statement to create a table in your data warehouse to receive all the data. Once you have a table built, it may seem like the easiest way to replicate your data (especially if there isn't much of it) is to build INSERT statements to add data to your Redshift table row by row. If you have any experience with SQL, this probably will be your first inclination. Think again! Redshift isn't optimized for inserting data one row at a time. If you have a high volume of data to be inserted, you should load the data into Amazon S3 and then use the COPY command to load it into Redshift. Keeping Selligent data up to date At this point you've coded up a script or written a program to get the data you want and successfully moved it into your data warehouse. But how will you load new or updated data? It's not a good idea to replicate all of your data each time you have updated records. That process would be painfully slow and resource-intensive. Instead, identify key fields that your script can use to bookmark its progression through the data and use to pick up where it left off as it looks for updated data. Auto-incrementing fields such as updated_at or created_at work best for this. When you've built in this functionality, you can set up your script as a cron job or continuous loop to get new data as it appears in Selligent. And remember, as with any code, once you write it, you have to maintain it. If Selligent modifies its API, or the API sends a field with a datatype your code doesn't recognize, you may have to modify the script. If your users want slightly different information, you definitely will have to. Other data warehouse options Panoply is great, but sometimes you need to optimize for different things when you're choosing a data warehouse. Some folks choose to go with Amazon Redshift, Google BigQuery, PostgreSQL, or Snowflake, which are RDBMSes that use similar SQL syntax. If you're interested in seeing the relevant steps for loading data into one of these platforms, check out To Redshift, To BigQuery, To Postgres, and To Snowflake. Easier and faster alternatives If all this sounds a bit overwhelming, don’t be alarmed. If you have all the skills necessary to go through this process, chances are building and maintaining a script like this isn’t a very high-leverage use of your time. Thankfully, products like Stitch were built to solve this problem automatically. With just a few clicks, Stitch starts extracting your Selligent data via the API, structuring it in a way that is optimized for analysis, and inserting that data into your Panoply data warehouse.
__label__pos
0.549585
The Composite Design Pattern in PHP: Part I from Conceptual to Practical compositionShow Me the Practical! A defining characteristic of PHP programmers is their practicality. Ironically, because of their practical orientation, they sometimes overlook the practicality of the abstract and conceptual. Focusing too much on a single (albeit quite useful and practical) implementation is like focusing on a single pixel in a graphic image—you can only see the pixel and miss the larger picture. That’s because the focus is in the wrong place. If you want to see the whole picture (the whole pattern and its parts), the individual pixels are not very practical, even though they are the atomic matter in computer graphics. In other posts on this blog, I have stressed the importance of having a complete design pattern with all of the participants a given design pattern is designed to include and provided a look-up table to check the parts list of all of the core patterns the Gang of Four developed. So in selecting an example to launch a discussion of the Composite pattern, I wanted an example with all of its parts. You can download the files and play a sample using the following two buttons: PlayDownload However, too much of a love affair with the conceptual and abstract is equally problematic. If you cannot use a pattern to get something accomplished, why waste time with it? Ironically, design patterns were developed solely for practical purposes, but in order to accomplish those practical goals, they had to provide a set of patterns that would be useful for a wide range of certain recurring programming problems. With these concepts in mind, this post begins with a conceptual example to see how the pattern works and then follow it up with a simple more practical example. Overview First things first. The class diagram for the Composite design pattern is both very simple, but it hides a real beauty and subtle complexity. Figure 1 shows the basic pattern: Figure 1: Composite Design Pattern class diagram Figure 1: Composite Design Pattern class diagram Before going too far with an example, even an abstract one, consider the simplicity and irony of the pattern. First of all, the Composite implementation of the Component interface looks fairly cut and dried. However, the Leaf participant also implements the interface, but it doesn’t implement all of the abstract methods. It only implements the operation(). So maybe the interface is an abstract class? It doesn’t matter in PHP. If all of the abstract methods of a class that inherits an abstract class are not implemented, you get the following error: Fatal error: Class Leaf contains 1 abstract method and must therefore be declared abstract or implement the remaining methods Since you can only declare an abstract method within an abstract class, you really have no choice but to implement it. Likewise with an interface all methods must be implemented; so with this first abstract example, I decided to use an interface instead of an abstract class. Favoring Composition: A Tattoo for Your Spouse One of the fundamental principles of design patterns is, Favor object composition over class inheritance. Figure 3: Tats for PHP Developers Figure 3: Tats for PHP Developers I tried to get my wife to get a tattoo with that piece of wisdom, but she balked at the idea. (I have no idea why; it’d be cool.) If any pattern adheres to that dictum, it’s the Composite pattern. The Composite participants are made up of Leaf primitives. So you can think of Composite implementations as compositions. The Leaf participants have no children. Think of the Leaf participants as parts that can be employed to create Composite participants. For the longest time, that simple yet fundamental idea eluded me. In part, this was due to the Gang of Four’s intent statement that the pattern was to Compose objets into tree structures to represent part-whole hierarchies. I was too focused on the concept of hierarchy and not enough on compose to really appreciate the importance of the Composite design pattern. So here, the first thing to do is to realize that the hierarchy is a form of composition. Imagine an automobile assembly plant where you have lots of parts, yet with those parts you can assemble different models of cars. You just have to use different selections of parts to compose the different models. The fact that the parts have a hierarchic arrangement is for efficiency; not the final product itself. Figure two shows what we hope to create in this initial implementation: Figure 2: Different Compositions from Leaf Selections Figure 2: Different Compositions from Leaf Selections A Party of Four The Composite pattern is pretty simple when you consider the main four participants: • Component (IComponent): • Declares interface • Implements default behavior as appropriate • Declare an interface for access and managing its child components • Optionally defines an interface for accessing a components parent in the recursive structure and implements it if appropriate. • Leaf • Represents leaf objects in composition. • Defines behavior for primitive objects in the composition. • Composite • Defines behavior for components having children • Stores child components • Implements child-related operations in the Component interface • Client • Manipulates objects in the composition through the Component interface. Beginning with the Component interface (IComponent), this example uses an interface rather than an abstract class. The example was originally developed by Dr. Chandima Cumaranatunge in ActionScript 3.0 Design Patterns (O’Reilly, 2007), and has been re-written in PHP: <?php //IComponent.php interface IComponent { public function operation(); public function add(IComponent $comOn); public function remove(IComponent $comGone); public function getChild($someInt); } ?> The key methods in the IComponent interface are operation() and add(IComponent $comOn). The operation() method is a recursive one in the Composite class because it must include all of the child elements in the composed objects. In the Leaf class, the same operation must be implemented, but it is done without recursion because Leaf objects have no children. The add(IComponent $comOn) method includes type hinting in its parameter. It expects some kind of implementation of the interface. That means that the argument must be either a Composite object or a Leaf object. This is where programming to the interface comes in. The program doesn’t care where the component comes from (a Leaf or Composite); as long as it implements the interface. In these early abstract examples, we’re not implementing the remove(IComponent $comGone) and getChild($someInt) so that we can have a clear focus on what the composition is doing. Composite The Composite class in this example has the key features we need to address. Unlike the Leaf class, it can add implemented IComponent objects. These become the children of the Composite class. <?php //Composite.php class Composite implements IComponent { private $sName; private $aChildren;   public function __construct($sNodeName) { $this->sName=$sNodeName; $this->aChildren=array(); }   public function add(IComponent $comOn) { array_push($this->aChildren,$comOn); }   public function remove(IComponent $comGone) { //Code to remove component }   public function getChild($someInt) { //Code to get child by element value }   //Note: The following method is recursive public function operation() { echo $this->sName . "<br />"; foreach($this->aChildren as $elVal) { $elVal->operation(); } } } ?> The operation() method is recursive. It calls itself. The $elVal variable stores $aChildren elements as the operation() method iterates through the $aChildren array. Some of the objects will be Leaf ones and others will be Composite. As a Leaf object is iterated out of the array, it calls the Leaf object method ($Leaf->operation()), while Composite objects call the Composite’s operation() method ($Composite->operation()). To get a better idea of how this crucial operation works, comment out the echo statement first in the Leaf operation and run the program. Then uncomment out the echo statement in the Leaf, and comment out the echo statement in the Composite operation() method. You will see that each Leaf and Composite instance is processed by the operation() method of the corresponding object. Leaf The Leaf objects are best viewed as primitives.The Leaf participant in the pattern defines the behavior of the primitives. In this context, “primitives” are some kind of basic building blocks whether primitive data types such as integers, Booleans, or even strings. These can them be composed via the Composite object or be used independent of any composition. The Gang of Four provide an example where different Leaf classes are used to provide different drawing elements, such as lines, circles, and rectangles. These can then be composed to create a picture in the Composite object. While this example only has a single Leaf object, Composites usually have several. <?php //Leaf.php class Leaf implements IComponent { private $sName;   public function __construct($sNodeName) { $this->sName=$sNodeName; }   /* None of this batch of methods are used by Leaf */ /* However in order to correctly implement the interface */ /* you need some kind of implementation */ public function add(IComponent $comOn){} public function remove(IComponent $comGone){} public function getChild($someInt){}   /* Some userful content is required for the operation */ public function operation() { echo $this->sName . "<br />"; } } ?> The only functionally implemented method in the Leaf class is the operation(). You may wonder, Why bother implementing the interface if the bulk of the methods are not implemented? In programming to the interface instead of the implementation, the same interface is used by both Leaf and Composite classes. In requesting them, the Client can request either a primitive or composite object without having to distinguish one from the other. This makes the development of the Client much simpler, and Leaf classes may be added using the existing structure. Client <?php //Client.php ERROR_REPORTING( E_ALL | E_STRICT ); ini_set("display_errors", 1); function __autoload($class_name) { include $class_name . '.php'; } class Client { private $rootCompos;   public function __construct() { $this->rootCompos = new Composite("Root"); $n1=new Composite("-Composite 1"); $n1->add(new Leaf("--C1:leaf 1")); $n1->add(new Leaf("--C1:leaf 2")); $this->rootCompos->add($n1);   $n2=new Composite("-Composite 2"); $n2->add(new Leaf("--C2:leaf 3")); $n2->add(new Leaf("--C2:leaf 4")); $n2->add(new Leaf("--C2:leaf 5")); $this->rootCompos->add($n2);   $this->rootCompos->add(new Leaf("R:leaf 6"));   //Create a node $this->rootCompos->operation(); } } $worker=new Client(); ?> Since the example is minimal and abstract, it applies to any language, and after making adjustments for PHP, it works exactly as planned. (In the download, I included two examples; one using an abstract class and and the other an interface.) It’s primary role is to illustrate how the Composite design pattern works and the output shows the flow. In the following output, your can see the hierarchy shown in Figure 2 is generated by the program: Root -Composite 1 –C1:leaf 1 –C1:leaf 2 -Composite 2 –C2:leaf 3 –C2:leaf 4 –C2:leaf 5 R:leaf 6 In Part II of this series, we’ll take this same program and add concrete elements to the program for further illustration of what can be done with the Composite. Eventually, we’ll see how this pattern can be used in typical kinds of problems that PHP solves. For now, play around with the pattern, and if you have any comments or possible practical projects you’d consider using it for, send in a comment. Share Copyright © 2013 William Sanders. All Rights Reserved. 0 Responses to “The Composite Design Pattern in PHP: Part I from Conceptual to Practical” • No Comments Leave a Reply
__label__pos
0.984803
1. This site uses cookies. By continuing to use this site, you are agreeing to our use of cookies. Learn More. News Nvidia announces Volta-based Xavier SoC Discussion in 'Article Discussion' started by Gareth Halfacree, 29 Sep 2016. 1. Gareth Halfacree Gareth Halfacree WIIGII! Staff Administrator Super Moderator Moderator Joined: 4 Dec 2007 Posts: 13,135 Likes Received: 2,244 2. ModSquid ModSquid Member Joined: 16 Apr 2011 Posts: 494 Likes Received: 13 I understand there's a cost implication, but given they're already in mass production, isn't it cheaper to chuck a low-end Core/Pentium/Tegra/whatever chip in these "other" applications, rather than tee up a new fabrication line?   3. Guest-16 Guest-16 Guest Automotive will never use a consumer chip as they have specialist requirements inline with embedded: > Extensive validation + ISO certs. You complete the project, which is normally "yay sell time" then it's another 9+ months of testing. > Environmentally hardened - temp + packaging. > Unique IOs. This is becoming less the case as automated vehicles standardize around 'regular' IO > Auto is loooooong tail, like embedded. New car is 2-3 years design + 7 years of sales + 3 years of aftercare. You are contractually obligated to guarantee the chip will be around for at least 10 years. Core 2 Duo would just be seeing EOL in other words. > Customized designs to meet specific needs - you don't just slap as much as you can in one central processor. It's actually better to have certain fusions of parts around the vehicle with their own local, low power processing. That's more risk averse + uniform low temp + low power + small size to fit into nooks and crannies around the vehicle. This is why your usual $5 chip is now 5-10x as much.   4. Harlequin Harlequin Well-Known Member Joined: 4 Jun 2004 Posts: 7,071 Likes Received: 179 hope they don't get another bumpgate ;)   Share This Page
__label__pos
0.855166
WP Statistics is a popular and powerful WordPress plugin for tracking statistics on your WordPress website. However if you have any plugins or anything that creates dynamic pages that don’t have a post id assigned to them then you will get lots of errors in your web server error logs like this: WordPress database error You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '' at line 1 for query SELECT `page_id` FROM wp_statistics_pages WHERE `date` = '2020-11-13' AND `type` = '' AND `id` = made by require('wp-blog-header.php'), wp, WP->main, WP->parse_request, do_action_ref_array('parse_request'), WP_Hook->do_action, WP_Hook->apply_filters, rest_api_loaded, WP_REST_Server->serve_request, WP_REST_Server->dispatch, WP_Statistics_Rest->hit, WP_Statistics_Hits->Pages' One method to get rid of the error is to disable the Track all pages setting in WP Statistics Settings. However by doing this it will no longer log these dynamically generated pages anymore and also doesn’t seem to log custom post types either, among various other things, so this option is unacceptable. After trying out various things I settled on modifying the query by using the query filter like this: /** * Intercepts database queries. * * @param $query * * @return string */ function fix_wp_statistics_query( $query ) { // intercept WP Statistics query to prevent error log spam on listings page because it has no post id if ( strpos( $query, 'SELECT `page_id` FROM wp_statistics_pages WHERE `date`' ) !== false ) { if ( substr( $query, - 2 ) == '= ' ) { $query .= '0'; } } return $query; } add_filter( 'query', 'fix_wp_statistics_query' ); This will check every database query for the right query by comparing the start of the string before the date. If that matches it’ll check if the query ends with ‘= ‘ which is what happens when there is no post id and what causes the MySQL error. Then it will tack on a 0 to the end of the query so that it’s a valid MySQL query thereby stopping the error log spam. The page still works, things still work ok. One thing I thought about doing is creating a dummy page for using it’s post id instead of 0 but then there may be more than a single page without a post id on the site so I figured it was kind of pointless. It seems to still work and I haven’t seen any other errors occurring from this change so far. There are probably some other ways to go about it and maybe something better, ideally there would be a fix applied to the plugin itself but for now this is how I’ve done it. If you’ve found a better way please let me know.
__label__pos
0.77838
Metropolitan Area Network (MAN)       MAN interconnects users with computer resources in a geographic area or region larger than that covered by even a large LAN but smaller than the area covered by a WAN. The term is applied to the interconnection of networks in a city into a single large network (which may then also offer an efficient connection to a WAN). It is also used to interconnect several LANs by bridging them with backbone lines.      There are three important features that separate MANs from LANs or WANs: 1.  The network size falls immediate between LANs and WANs. A MAN typically covers an area between 5 and 50 KM diameter. Many MANs cover an area of the size of a city, although in some cases, MANs may be as small as a group of buildings or as large as New Delhi. 2.  A MAN (like a WAN) is generally not owned by a single organization. Generally, the Man is owned by any individual company or network provider or by a consortium of users. This level of service provided to each user must, therefore, be negotiated with the MAN .operator, and some performance guarantees are normally specified. 3.   We can use MAN as a large LAN in large scale. It is also frequently used to add a shared connection to other networks using a link to a WAN. MAN is used to provide regional networks which share the cost of access to a WAN. Ø  NOTE:- (i) MAN are networks spread over a city. For example, cable T.V. networks are spread over a city.                            (ii) MAN is a network that is utilized across multiple buildings. A MAN is much larger than a standard LAN but is not as large as a WAN. It is commonly used in school campuses, large universities and large companies with multiple buildings. Leave a Reply Your email address will not be published. Required fields are marked *
__label__pos
0.931917
Untitled [email protected] avatar unknown plain_text 10 days ago 2.2 kB 1 Indexable Never void setup() { Serial.begin(9600); } void loop() { int a; int b; int c; int d; int ab; int cd; int x; int langkah = 1; int pilihan; Serial.println("Mencari hasil dari Layang Layang"); Serial.println("1.Keliling"); Serial.println("2.Luas"); //Serial.println("pilihan anda : "); while (langkah == 1) { while (Serial.available()) { pilihan = Serial.parseInt(); langkah = 2; } } switch (pilihan) { case (1): Serial.print("anda memilih keliling, masukan a : "); while (langkah == 2) { while (Serial.available()) { a = Serial.parseInt(); Serial.println(a); langkah = 3; } } Serial.print("Silakan masukan nilai b : "); while (langkah == 3) { while (Serial.available()) { b = Serial.parseInt(); Serial.println(b); langkah = 4; } } Serial.print("Silakan masukan nilai c : "); while (langkah == 4) { while (Serial.available()) { c = Serial.parseInt(); Serial.println(c); langkah = 5; } } Serial.print("Silakan masukan nilai d : "); while (langkah == 5) { while (Serial.available()) { d = Serial.parseInt(); Serial.println(d); langkah = 6; } } x = a+b+c+d; Serial.print("hasil operasi dari keliling layang layang : "); Serial.println(x); Serial.println(); break; case (2): Serial.print("anda memilih keliling, masukan ab : "); while (langkah == 2) { while (Serial.available()) { ab = Serial.parseInt(); Serial.println(ab); langkah = 3; } } Serial.print("Silakan masukan nilai cd : "); while (langkah == 3) { while (Serial.available()) { cd = Serial.parseInt(); Serial.println(cd); langkah = 4; } } x = (ab*cd)/2; Serial.print("hasil operasi dari luas layang layang : "); Serial.println(x); Serial.println(); break; } } Leave a Comment
__label__pos
0.999996
1 $\begingroup$ I am trying to calculate the cooordinates of the center of a group of objects using this code: obj = bpy.context.active_object vcos = [ obj.matrix_world * v.co for v in obj.data.vertices ] findCenter = lambda l: ( max(l) + min(l) ) / 2 x,y,z = [ [ v[i] for v in vcos ] for i in range(3) ] center = [ findCenter(axis) for axis in [x,y,z] ] The problem is that "bpy.context.active_object" returns only the (bpy_struct, Object) of one OBJECT only (the first to render) and does not consider the other objects. As a result the obtained X,Y,Z coordinates are only correct for one object and not all of them. Any solution? Amir $\endgroup$ 3 • 1 $\begingroup$ try bpy.context.selected_objects $\endgroup$ – Gorgious Sep 9, 2020 at 13:48 • $\begingroup$ @Gorgious: This gives ('list' object has no attribute 'data') this is attribute of vertices used later in the code. I use this code after rendering, so I have a loop and I render hide some objects and render others and in each time I render an individual object I wanna get it X,Y,Z coordinates. For some rason, when I print the coordinates of (center) of each object rendered separately in the loop, i find the same coordinates and I find "obj" is only referring to the first rendered object and does not change when I render the other objects. Any advise???? $\endgroup$ – Amir Aly Sep 9, 2020 at 14:47 • $\begingroup$ What is not clear for me: do you want the center considering all objects at once (so 1 center for all) or the center for each object? (@Gorgious) $\endgroup$ – lemon Sep 9, 2020 at 15:04 2 Answers 2 2 $\begingroup$ Take this image : enter image description here bpy.context.active_object only gives a reference to the active object in the given context. This is Cube.002, with the light orange outline. Cube is selected, but it is not the active object. (dark orange outline). When you type bpy.context.selected_objects in the blender console you get an unordered list of all the selected objects. >>> bpy.context.selected_objects [bpy.data.objects['Cube'], bpy.data.objects['Cube.002']] In python you can iterate through a list of objects with a for loop. (Documentation) Your code, which is valid for one object, can be translated for a list to : import bpy for obj in bpy.context.selected_objects: if obj.type != 'MESH': continue # Sidenote : Use @ instead of * for element-wise multiplication in matrices vcos = [ obj.matrix_world @ v.co for v in obj.data.vertices ] findCenter = lambda l: ( max(l) + min(l) ) / 2 x,y,z = [ [ v[i] for v in vcos ] for i in range(3) ] center = [ findCenter(axis) for axis in [x,y,z] ] print(center) Result : enter image description here $\endgroup$ 2 • $\begingroup$ Given the OP's new question might pay to put the type checking in loop, eg if obj.type != 'MESH' : continue to avoid trying to process vertices of non mesh objects. blender.stackexchange.com/questions/196663/… $\endgroup$ – batFINGER Oct 7, 2020 at 13:54 • $\begingroup$ @batFINGER Good point ! Edited answer $\endgroup$ – Gorgious Oct 7, 2020 at 14:07 1 $\begingroup$ Bounds center is the middle of the bounding box. The local positions of the axis oriented bounding box of a mesh are generated for us and available in the object's bounding box property in local coordinates. The average of the bounding boxes 8 corner coordinates give the local center, which can be converted to global by multiplying by matrix world of owner. Pros, only summing 8 coordinates per object and matrix multiplying once Using the object's vertices is unnecessary overhead. import bpy from mathutils import Vector # mesh objects in scene scene = bpy.context.scene mesh_obs = [o for o in scene.objects if o.type == 'MESH'] for ob in mesh_obs: bbox_local_center = sum((Vector(b) for b in ob.bound_box), Vector()) / 8 center = ob.matrix_world @ bbox_local_center print(f"{ob.name} {center}") Thought this rang a bell, possible duplicate and source of question code. Get center of geometry of an object If you source code from elsewhere, providing a link gives context. Note. If instead you wish to get the "global bounding box" of all recommend using numpy. import bpy import numpy as np # mesh objects in scene scene = bpy.context.scene mesh_obs = [o for o in scene.objects if o.type == 'MESH'] # stack global coords of bboxes of all objects in scene coords = np.vstack( (np.dot( np.hstack( (np.array(ob.bound_box), np.ones(8).reshape(8, 1)) ), ob.matrix_world.transposed() ) for ob in mesh_obs) ) #bottom front left corner, top, right back corner print(coords.min(), coords.max()) $\endgroup$ You must log in to answer this question. Not the answer you're looking for? Browse other questions tagged .
__label__pos
0.813813
How do i share a link from my ipad? I dont find a share button Discussion in 'Off-Topic' started by pattiumland, Sep 24, 2012. 1. pattiumland pattiumland Expand Collapse iPF Noob Joined: Sep 24, 2012 Messages: 2 Thanks Received: 0 Anyone know how to do this ?   2. twerppoet twerppoet Expand Collapse iPad Legend Joined: Jan 8, 2011 Messages: 19,428 Thanks Received: 4,274 What app are you trying to share a link in/from? Some apps can, some can't.   3. pattiumland pattiumland Expand Collapse iPF Noob Joined: Sep 24, 2012 Messages: 2 Thanks Received: 0 I just want to be able to share any post or picture as if I'm on a regular pc. There isn't a share button on the ipad   4. twerppoet twerppoet Expand Collapse iPad Legend Joined: Jan 8, 2011 Messages: 19,428 Thanks Received: 4,274 Each app developer decides whether to add share options, or not. When they are there, they usually show up under the action/share icon, It looks like a box with and arrow curving out of it. SharePicture.png FaceBook is now more closely integrated with iOS 6, so expect to see it show up in more apps. Other than that, you can often tap and hold on a link to get a copy option. You can then go to another app and paste that link into whatever you are posting. CopyLink.png   Share This Page Search tags for this page facebook share button on ipad , how do i share a link on facebook from my ipad , how do i share on facebook from my ipad , how do you share a link on facebook on ipad , how to share a link on facebook on ipad , how to share a link on ipad , no share button on facebook ipad , share button on ipad , where is the share button on my ipad , why can't i share on facebook from my ipad
__label__pos
0.569459
GATE | GATE-CS-2006 | Question 40 Consider numbers represented in 4-bit gray code. Let h3h2h1h0 be the gray code representation of a number n and let g3g2g1g0 be the gray code of (n + 1) (modulo 16) value of the number. Which one of the following functions is correct? A: g_{0}(h_{3},h_{2},h_{1},h_{0})=\sum (1,2,3,6,10,13,14,15) B: g_{1}(h_{3},h_{2},h_{1},h_{0})=\sum (4,9,10,11,12,13,14,15) C: g_{2}(h_{3},h_{2},h_{1},h_{0})=\sum (2,4,5,6,7,12,13,15) D: g_{3}(h_{3},h_{2},h_{1},h_{0})=\sum (0,1,6,7,10,11,12,13) (A) A (B) B (C) C (D) D Answer: (C) Explanation: Write gray code (n) numbers from 0 to 15 and make another column for (n+1) numbers by shifting the next number on top. As shown in the table: gray-code Now to determine the min terms for g3, g2, g1, g0, we see the ‘1s’ in the corresponding columns. The digit they correspond to give the min terms of the function. Thus the answers we will be: G3(h3h2h1h0)= ∑(4,12,13,15,14,10,11,9) G2(h3h2h1h0)= ∑(2,6,7,5,4,12,13,15) G1(h3h2h1h0)= ∑(1,3,2,6,13,15,14,10) G0(h3h2h1h0)= ∑(0,1,6,7,12,13,10,11) Thus g2 is given correctly in the options. Read about K-Maps to know about mapping, SOP and POS forms: K-Map (Karnaugh Map) Watch NPTEL videos to learn more about: Code converters Logic Minimization Using Karnaugh Maps Karnaugh Map Minimization Using Maxterms This explanation has been contributed by Kriti Kushwaha. Quiz of this Question My Personal Notes arrow_drop_up Article Tags : Be the First to upvote. Please write to us at [email protected] to report any issue with the above content.
__label__pos
0.839182
SSL 2732_导弹拦截_dp+最小路径覆盖 发布于 2017-09-16  460 次阅读 题目描述 某国为了防御敌国的导弹袭击,发展出一种导弹拦截系统。 敌国的导弹形成了立体打击,每个导弹可以抽象成一个三维空间中的点(x; y; z)。拦截系统发射的炮弹也很好地应对了这种情况,每一发炮弹也可以视为一个三维空间中的点。 但是这种导弹拦截系统有一个缺陷:虽然它的第一发炮弹能够到达三维空间中任意的点,但是以后每一发炮弹到达点的坐标(x; y; z) 的三个坐标值都必须大于前一发炮弹的对应坐标值。 某天,雷达捕捉到敌国的导弹来袭。由于该系统还在试用阶段,所以只有一套系统,因此有可能不能拦截所有的导弹。 输入导弹飞来的坐标,计算这套系统最多能拦截多少导弹,如果要拦截所有导弹最少要配备多少套这种导弹拦截系统。注意: 所有导弹都是同时飞来的   思路 orz人生导师jpwang 前一问一个dp,f[i]表示拦截第i个可以拦截最多拦截到的导弹 f[i] = 1 f[i] = max(f[j]+1) (1 <= j < i) 且受题目限制   来考虑第二问 我们可以将i拆点为 i, i', 对于两个导弹i,j,若从i可以打j,那么我们就连一条(i,j')的边 这样就变成了一个二分图,因为我们要将全部点都达到,所以就是求一个二分图的最小路径覆盖 = 点数 - 最大匹配 跑网络流或其他算法都可以 #include <stdio.h> #include <queue> #include <algorithm> #include <cstring> #include <string> using namespace std; #define max(x, y) (x) > (y) ? (x) : (y) #define min(x, y) (x) < (y) ? (x) : (y) #define INF 0x7f7f7f7f #define fill(x, y) memset(x, y, sizeof(x)) struct edge { int x, y, z; }e[2000001]; struct arr { int to, w, next; }e1[2000001]; int n, f[10001], ls[20001], cur[20001], state[20001], S, E; inline int read() { int x=0,p=1;char ch=getchar(); while (ch<'0'||ch>'9'){if (ch=='-')p=-1;ch=getchar();} while (ch>='0'&&ch<='9'){x=(x<<1)+(x<<3)+ch-'0';ch=getchar();} return x*p; } int cmp(edge a, edge b) { return a.x < b.x ; } int maxE = 1; int add(int x, int y, int w) { e1[++maxE] = (arr) {y, w, ls[x]}; ls[x] = maxE; e1[++maxE] = (arr) {x, 0, ls[y]}; ls[y] = maxE; } int bfs(int S, int E) { queue<int> t; fill(state, 0); t.push(S); state[S] = 1; while (!t.empty()) { int now = t.front(); t.pop(); for (int i = ls[now]; i; i = e1[i].next) { if (e1[i].w > 0 && !state[e1[i].to]) { state[e1[i].to] = state[now] + 1; t.push(e1[i].to); if (e1[i].to == E) return true; } } } return false; } int find(int now, int mn) { if (!mn || now == E) return mn; int ret = 0; for (int &i = cur[now]; i; i = e1[i].next) if (state[now] + 1 == state[e1[i].to] && e1[i].w > 0) { int d = find(e1[i].to, min(e1[i].w, mn - ret)); e1[i].w -= d; e1[i^1].w += d; ret += d; if (ret == mn) break; } return ret; } int dinic() { int ans = 0; while (bfs(S, E)) { for (int i = S; i <= E; i++) cur[i] = ls[i]; ans += find(S, INF); } return ans; } int main() { scanf("%d", &n); for (int i = 1; i <= n; i++) { e[i].x = read(); e[i].y = read(); e[i].z = read(); } sort(e + 1, e + n + 1, cmp); for (int i = 1; i <= n; i++) { f[i] = 1; for (int j = 1; j < i; j++) { if (e[j].x < e[i].x && e[j].y < e[i].y && e[j].z < e[i].z) f[i] = max(f[i], f[j] + 1); } } int ans = 0; for (int i = 1; i <= n; i++) { ans = max(ans, f[i]); } printf("%d\n", ans); int l = 0; for (int i = 1; i <= n; i++) for (int j = 1; j < i; j++) if (e[j].x < e[i].x && e[j].y < e[i].y && e[j].z < e[i].z) add(j, i + n, 1); S = 0; E = n * 2 + 1; for (int i = 1; i <= n; i++) { add(S, i, 1); add(i + n, E, 1); } printf("%d", n - dinic()); } 「雪霁融雾月,冰消凝夜雨」
__label__pos
0.996219
Let Y1, Y2, …..Yn denote a random sample of size n froma population whose density is given by f(y) = [αy^(α – 1)]/(θ^α) 0=y=θ f(y) = 0 elsewhere where α > 0 is aknown, fixed value, but θis unknown. Consider the estimator θ hat = max (Y1, Y2,…..Yn). a. show that θ hat isa biased estimator of θ. Answers Detailed answers to tough homework problems
__label__pos
0.989785
BKCommonLib/Metrics From BergerHealer Wiki Revision as of 20:50, 9 June 2021 by RyanDo (talk | contribs) Jump to navigation Jump to search « Go back Introduction Metrics is a service provided by Hidendra to keep track of plugin statistics. It allows plugin developers to see who uses their plugin the most, and to keep track of certain features. This makes it easier to decide on new features and to see what parts of the plugin really matter to most people. The data is sent anonymously, that is, no server IP/player names/personal information is sent to the database. You do not want to participate in this as a server? Aw...oh well, you can opt-out on Metrics globally in the metrics configuration file: plugins\PluginMetrics\config.yml < change opt-out: to true. Metrics in BKCommonLib A Metrics implementation, almost entirely similar as the one provided by mcstats, is included in BKCommonLib. It provides an easy API layer to set up the data you wish to send to the server. Instead of scheduling a task to update it every other time, or having synchronization issues, the Metrics implementation in BKCommonLib works slightly differently. Creating Metrics for your plugin How to obtain a Metrics class instance to use it in your plugin. PluginBase If you use PluginBase for your plugin, all you have to do is add 'metrics: true' to the plugin.yml of the plugin. After that you can obtain the Metrics instance using the provided getMetrics() method. Before using it, check that metrics is available (and enabled) using the hasMetrics() method, otherwise errors will occur. Manually To manually create a new Metrics instance, use the following code: Metrics metrics = Metrics.initialize(this); if (metrics != null) { // Add your graphs here } You can also create a new Metrics instance using the provided constructor, and handle the errors yourself. In that case, make sure you also call start() once all graphs are added. The initialize method does that for you. Adding graphs Graphs in this implementation work slightly different than the ones found elsewhere. In BKCommonLib, Metrics graphs contain an onUpdate method in which the values can be set. It also does not contain a 'Plotter' class, instead it uses a map of values. it is up to the plugin to fill or update the data in the graph. The onUpdate method is called on the main thread (synchronized), so it is thread-safe to access Bukkit or plugin resources. To add a graph, use Metrics.addGraph. Changing plotter values in graphs A plotter value is basically a single value for part of a graph displayed on mcstats.org. For example, if you have a Commands graph, you can add plotter values for all commands the plugin provides, mapped to the amount of times it was performed. Plotter values have to be numeric (int, double, float, etc.), non-numeric text is not possible simply because it can't be displayed in a graph. There are several methods to change 'plotter' values: • clearPlotters - removes all set plotters • togglePlotter - Adds a plotter with value '1' if enabled, or removes the plotter if disabled • addPlotter - Adds a plotter mapping the value to a key Example graph implementation (found in enable() of a PluginBase): // Total server memory getMetrics().addGraph(new Graph("Total server memory") { @Override public void onUpdate(Plugin plugin) { clearPlotters(); // Get server total memory in MB (>> 20 = / (1024 * 1024)) final long mem = Runtime.getRuntime().totalMemory() >> 20; final String key; if (mem <= 512) { key = "0-512 MB"; } else if (mem <= 1024) { key = "512-1024 MB"; } else if (mem <= 2048) { key = "1024-2048 MB"; } else if (mem <= 4096) { key = "2048-4096 MB"; } else if (mem <= 8192) { key = "4096-8192 MB"; } else if (mem <= 16384) { key = "8-16 GB"; } else { key = "16+ GB"; } togglePlotter(key, true); } }); This produces a graph as follows (pie chart): Total server memory - Pie chart Default graph implementations BKCommonLib also includes some 'common' Graph implementations. These are: Important notes • onUpdate is not called if the server opted out, also if the server opted out after your metrics instance was started. • getMetrics() throws an exception if no metrics is available - use hasMetrics() before using it • To use metrics in PluginBase, add the metrics: true to the plugin.yml. By default metrics is disabled. • Only use numeric plotter values
__label__pos
0.871353
0 $\begingroup$ I'm trying to solve a simple system of differential equations but get outputs i just cant grasp, they seem way overkill. Someone said it might be because mathematica interprits the constants as non-real and therefore gives an over kill solution. I tried adding this assumption but it doesn't seem to help. I'm new to mathmatica and have no idea what I'm doing, any help would be appreciated. The code can be found below DSolve[{kbz*z1 + cbz*z11 == kbz*z2[t] + kvr*(z2[t] - z3[t]) + cbz*z2'[t] + cvr*(z2'[t] - z3'[t]), ksoil*zr == kvr*(z3[t] - z2[t]) + ksoil*z3[t] + cvr*(z3'[t] - z2'[t]), z2[0] == 0, z3[0] == 0}, {z2[t], z3[t]}, t, Assumptions -> {kbz, kvr, ksoil, cbz, cvr, z1, z11, zr} \[Element] Reals] Does anyone have any ideas of how to fix it? Edited for proper initial conditions. $\endgroup$ • 1 $\begingroup$ Indeed, the output is huge. What happens when you try FullSimplify[]? $\endgroup$ – J. M. will be back soon Feb 25 '16 at 15:54 • $\begingroup$ NDSolve is made for this. $\endgroup$ – user36273 Feb 25 '16 at 16:51 • $\begingroup$ FullSimplify simplified it quite a bit but the output is still very large. When trying to use the NDSolve I get the error: "Encountered non-numerical value for a derivative at t == 0.`. " NDSolve[{kbzz1 == kbzz2[t] + kvr*(z2[t] - z3[t]) + cbzz2'[t] + cvr*(z2'[t] - z3'[t]), ksoilzr == kvr*(z3[t] - z2[t]) + ksoil*z3[t] + cvr*(z3'[t] - z2'[t]), z2[0] == 0, z3[0] == 0}, {z2[t], z3[t]}, {t, 0, 30}] $\endgroup$ – tomatpinne Feb 26 '16 at 7:44 3 $\begingroup$ First answer (no initial conditions) (The original question did not have initial conditions in the system of equations.) Look at the simplified forms obtained after replacing the integration constants with appropriate values. For example, if we assign the solution to a variable: sol = DSolve[{kbz*z1 == kbz*z2[t] + kvr*(z2[t] - z3[t]) + cbz*z2'[t] + cvr*(z2'[t] - z3'[t]), ksoil*zr == kvr*(z3[t] - z2[t]) + ksoil*z3[t] + cvr*(z3'[t] - z2'[t])}, {z2[t], z3[t]}, t, Assumptions -> {z1, kbz, kvr, ksoil, cbz, cvr, zr} \[Element] Reals] using the replacement rules {C[1] -> 0, C[2] -> 0} we get this simpler expression for z2[t]: In[76]:= Simplify[(z2[t] /. sol[[1]]) /. {C[1] -> 0, C[2] -> 0}] Out[76]= (kbz (ksoil + kvr) z1 + ksoil kvr zr)/(ksoil kvr + kbz (ksoil + kvr)) and for z3[t]: In[77]:= Simplify[(z3[t] /. sol[[1]]) /. {C[1] -> 0, C[2] -> 0}] Out[77]= (kbz kvr z1 + kbz ksoil zr + ksoil kvr zr)/(ksoil kvr + kbz (ksoil + kvr)) Second answer (initial conditions and NDSolve) (This update is about dealing with the new system with added initial conditions.) In order to solve this system with NDSolve, numerical values of the different constants in it have to be specified. Since the original question started with the sentence: I'm trying to solve a simple system of differential equations but get outputs i just cant grasp, they seem way overkill. I think it is best to use a combination of simplified DSolve results and plotted NDSolve results within a dynamic interface with Manipulate. Here is an example: enter image description here The code below does that. (Note that the preliminary simplification of the DSolve results has to be done otherwise the simplification within Manipulate might take too long.) sol = DSolve[{kbz*z1 == kbz*z2[t] + kvr*(z2[t] - z3[t]) + cbz*z2'[t] + cvr*(z2'[t] - z3'[t]), ksoil*zr == kvr*(z3[t] - z2[t]) + ksoil*z3[t] + cvr*(z3'[t] - z2'[t]), z2[0] == 0, z3[0] == 0}, {z2[t], z3[t]}, t, Assumptions -> {z1, kbz, kvr, ksoil, cbz, cvr, zr} \[Element] Reals]; dsol = {z2[t] -> Simplify[(z2[t] /. sol[[1]])], z3[t] -> Simplify[(z3[t] /. sol[[1]])]}; varRangeWidth = 10; Manipulate[ ndsol = NDSolve[{mkbz*mz1 == mkbz*z2[t] + mkvr*(z2[t] - z3[t]) + mcbz*z2'[t] + mcvr*(z2'[t] - z3'[t]), mksoil*mzr == mkvr*(z3[t] - z2[t]) + mksoil*z3[t] + mcvr*(z3'[t] - z2'[t]), z2[0] == 0, z3[0] == 0}, {z2[t], z3[t]}, {t, 0, 10}]; rules = {z1 -> mz1, kbz -> mkbz, kvr -> mkvr, ksoil -> mksoil, cbz -> mcbz, cvr -> mcvr, zr -> mzr}; rfunc = If[rationalizeQ, Rationalize, Identity]; Column[{ Grid[{ {z2[t], rfunc@Simplify[(z2[t] /. dsol) /. rules]}, {z3[t], rfunc@Simplify[(z3[t] /. dsol) /. rules]} }, Dividers -> All], Plot[Evaluate[{z2[t], z3[t]} /. ndsol[[1]]], {t, 0, tend}, PlotTheme -> "Scientific", GridLines -> Automatic, PlotLegends -> {"z2[t]", "z3[t]"}, ImageSize -> 500] }], {{mz1, 1, "z1"}, -varRangeWidth, varRangeWidth}, {{mkbz, 1, "kbz"}, -varRangeWidth, varRangeWidth}, {{mkvr, 1, "kvr"}, -varRangeWidth, varRangeWidth}, {{mksoil, 1, "ksoil"}, -varRangeWidth, varRangeWidth}, {{mcbz, 1, "cbz"}, -varRangeWidth, varRangeWidth}, {{mcvr, 1, "cvr"}, -varRangeWidth, varRangeWidth}, {{mzr, 1, "zr"}, -varRangeWidth, varRangeWidth}, {{tend, 10}, 0, 100}, {rationalizeQ, {True, False}}] Here is another snapshot of the code output: enter image description here $\endgroup$ • $\begingroup$ Yes this solves it quite nicely. Unfortunately our initial conditions are z2[0]=0 and z3[0]=0. Sorry that I forgot to specify the initial conditions. $\endgroup$ – tomatpinne Feb 26 '16 at 7:46 • $\begingroup$ @tomatpinne Please change your question accordingly. $\endgroup$ – Anton Antonov Feb 26 '16 at 15:07 Your Answer By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.98298
/* * conn.c * * Does all this gloriously nifty connection handling stuff... * */ #include #include "sock.h" static int aim_logoff(aim_session_t *sess); /* * In OSCAR, every connection has a set of SNAC groups associated * with it. These are the groups that you can send over this connection * without being guarenteed a "Not supported" SNAC error. * * The grand theory of things says that these associations transcend * what libfaim calls "connection types" (conn->type). You can probably * see the elegance here, but since I want to revel in it for a bit, you * get to hear it all spelled out. * * So let us say that you have your core BOS connection running. One * of your modules has just given you a SNAC of the group 0x0004 to send * you. Maybe an IM destined for some twit in Greenland. So you start * at the top of your connection list, looking for a connection that * claims to support group 0x0004. You find one. Why, that neat BOS * connection of yours can do that. So you send it on its way. * * Now, say, that fellow from Greenland has friends and they all want to * meet up with you in a lame chat room. This has landed you a SNAC * in the family 0x000e and you have to admit you're a bit lost. You've * searched your connection list for someone who wants to make your life * easy and deliver this SNAC for you, but there isn't one there. * * Here comes the good bit. Without even letting anyone know, particularly * the module that decided to send this SNAC, and definitly not that twit * in Greenland, you send out a service request. In this request, you have * marked the need for a connection supporting group 0x000e. A few seconds * later, you receive a service redirect with an IP address and a cookie in * it. Great, you say. Now I have something to do. Off you go, making * that connection. One of the first things you get from this new server * is a message saying that indeed it does support the group you were looking * for. So you continue and send rate confirmation and all that. * * Then you remember you had that SNAC to send, and now you have a means to * do it, and you do, and everyone is happy. Except the Greenlander, who is * still stuck in the bitter cold. * * Oh, and this is useful for building the Migration SNACs, too. In the * future, this may help convince me to implement rate limit mitigation * for real. We'll see. * * Just to make me look better, I'll say that I've known about this great * scheme for quite some time now. But I still haven't convinced myself * to make libfaim work that way. It would take a fair amount of effort, * and probably some client API changes as well. (Whenever I don't want * to do something, I just say it would change the client API. Then I * instantly have a couple of supporters of not doing it.) * * Generally, addgroup is only called by the internal handling of the * server ready SNAC. So if you want to do something before that, you'll * have to be more creative. That is done rather early, though, so I don't * think you have to worry about it. Unless you're me. I care deeply * about such inane things. * */ void aim_conn_addgroup(aim_conn_t *conn, guint16 group) { aim_conn_inside_t *ins = (aim_conn_inside_t *)conn->inside; struct snacgroup *sg; if (!(sg = g_malloc(sizeof(struct snacgroup)))) return; sg->group = group; sg->next = ins->groups; ins->groups = sg; return; } aim_conn_t *aim_conn_findbygroup(aim_session_t *sess, guint16 group) { aim_conn_t *cur; for (cur = sess->connlist; cur; cur = cur->next) { aim_conn_inside_t *ins = (aim_conn_inside_t *)cur->inside; struct snacgroup *sg; for (sg = ins->groups; sg; sg = sg->next) { if (sg->group == group) return cur; } } return NULL; } static void connkill_snacgroups(struct snacgroup **head) { struct snacgroup *sg; for (sg = *head; sg; ) { struct snacgroup *tmp; tmp = sg->next; g_free(sg); sg = tmp; } *head = NULL; return; } static void connkill_rates(struct rateclass **head) { struct rateclass *rc; for (rc = *head; rc; ) { struct rateclass *tmp; struct snacpair *sp; tmp = rc->next; for (sp = rc->members; sp; ) { struct snacpair *tmpsp; tmpsp = sp->next; g_free(sp); sp = tmpsp; } g_free(rc); rc = tmp; } *head = NULL; return; } static void connkill_real(aim_session_t *sess, aim_conn_t **deadconn) { aim_rxqueue_cleanbyconn(sess, *deadconn); aim_tx_cleanqueue(sess, *deadconn); if ((*deadconn)->fd != -1) aim_conn_close(*deadconn); /* * XXX ->priv should never be touched by the library. I know * it used to be, but I'm getting rid of all that. Use * ->internal instead. */ if ((*deadconn)->priv) g_free((*deadconn)->priv); /* * This will free ->internal if it necessary... */ if ((*deadconn)->type == AIM_CONN_TYPE_CHAT) aim_conn_kill_chat(sess, *deadconn); if ((*deadconn)->inside) { aim_conn_inside_t *inside = (aim_conn_inside_t *)(*deadconn)->inside; connkill_snacgroups(&inside->groups); connkill_rates(&inside->rates); g_free(inside); } g_free(*deadconn); *deadconn = NULL; return; } /** * aim_connrst - Clears out connection list, killing remaining connections. * @sess: Session to be cleared * * Clears out the connection list and kills any connections left. * */ static void aim_connrst(aim_session_t *sess) { if (sess->connlist) { aim_conn_t *cur = sess->connlist, *tmp; while (cur) { tmp = cur->next; aim_conn_close(cur); connkill_real(sess, &cur); cur = tmp; } } sess->connlist = NULL; return; } /** * aim_conn_init - Reset a connection to default values. * @deadconn: Connection to be reset * * Initializes and/or resets a connection structure. * */ static void aim_conn_init(aim_conn_t *deadconn) { if (!deadconn) return; deadconn->fd = -1; deadconn->subtype = -1; deadconn->type = -1; deadconn->seqnum = 0; deadconn->lastactivity = 0; deadconn->forcedlatency = 0; deadconn->handlerlist = NULL; deadconn->priv = NULL; memset(deadconn->inside, 0, sizeof(aim_conn_inside_t)); return; } /** * aim_conn_getnext - Gets a new connection structure. * @sess: Session * * Allocate a new empty connection structure. * */ static aim_conn_t *aim_conn_getnext(aim_session_t *sess) { aim_conn_t *newconn; if (!(newconn = g_new0(aim_conn_t,1))) return NULL; if (!(newconn->inside = g_new0(aim_conn_inside_t,1))) { g_free(newconn); return NULL; } aim_conn_init(newconn); newconn->next = sess->connlist; sess->connlist = newconn; return newconn; } /** * aim_conn_kill - Close and free a connection. * @sess: Session for the connection * @deadconn: Connection to be freed * * Close, clear, and free a connection structure. Should never be * called from within libfaim. * */ void aim_conn_kill(aim_session_t *sess, aim_conn_t **deadconn) { aim_conn_t *cur, **prev; if (!deadconn || !*deadconn) return; for (prev = &sess->connlist; (cur = *prev); ) { if (cur == *deadconn) { *prev = cur->next; break; } prev = &cur->next; } if (!cur) return; /* oops */ connkill_real(sess, &cur); return; } /** * aim_conn_close - Close a connection * @deadconn: Connection to close * * Close (but not free) a connection. * * This leaves everything untouched except for clearing the * handler list and setting the fd to -1 (used to recognize * dead connections). It will also remove cookies if necessary. * */ void aim_conn_close(aim_conn_t *deadconn) { if (deadconn->fd >= 3) closesocket(deadconn->fd); deadconn->fd = -1; if (deadconn->handlerlist) aim_clearhandlers(deadconn); return; } /** * aim_getconn_type - Find a connection of a specific type * @sess: Session to search * @type: Type of connection to look for * * Searches for a connection of the specified type in the * specified session. Returns the first connection of that * type found. * * XXX except for RENDEZVOUS, all uses of this should be removed and * use aim_conn_findbygroup() instead. */ aim_conn_t *aim_getconn_type(aim_session_t *sess, int type) { aim_conn_t *cur; for (cur = sess->connlist; cur; cur = cur->next) { if ((cur->type == type) && !(cur->status & AIM_CONN_STATUS_INPROGRESS)) break; } return cur; } aim_conn_t *aim_getconn_type_all(aim_session_t *sess, int type) { aim_conn_t *cur; for (cur = sess->connlist; cur; cur = cur->next) { if (cur->type == type) break; } return cur; } /** * aim_newconn - Open a new connection * @sess: Session to create connection in * @type: Type of connection to create * @dest: Host to connect to (in "host:port" syntax) * * Opens a new connection to the specified dest host of specified * type, using the proxy settings if available. If @host is %NULL, * the connection is allocated and returned, but no connection * is made. * * FIXME: Return errors in a more sane way. * */ aim_conn_t *aim_newconn(aim_session_t *sess, int type, const char *dest) { aim_conn_t *connstruct; guint16 port = AIM_LOGIN_PORT; char *host; int i; if (!(connstruct = aim_conn_getnext(sess))) return NULL; connstruct->sessv = (void *)sess; connstruct->type = type; if (!dest) { /* just allocate a struct */ connstruct->fd = -1; connstruct->status = 0; return connstruct; } /* * As of 23 Jul 1999, AOL now sends the port number, preceded by a * colon, in the BOS redirect. This fatally breaks all previous * libfaims. Bad, bad AOL. * * We put this here to catch every case. * */ for(i = 0; i < (int)strlen(dest); i++) { if (dest[i] == ':') { port = atoi(&(dest[i+1])); break; } } host = (char *)g_malloc(i+1); strncpy(host, dest, i); host[i] = '\0'; connstruct->fd = proxy_connect(host, port, NULL, NULL); g_free(host); return connstruct; } /** * aim_conn_setlatency - Set a forced latency value for connection * @conn: Conn to set latency for * @newval: Number of seconds to force between transmits * * Causes @newval seconds to be spent between transmits on a connection. * * This is my lame attempt at overcoming not understanding the rate * limiting. * * XXX: This should really be replaced with something that scales and * backs off like the real rate limiting does. * */ int aim_conn_setlatency(aim_conn_t *conn, int newval) { if (!conn) return -1; conn->forcedlatency = newval; conn->lastactivity = 0; /* reset this just to make sure */ return 0; } /** * aim_session_init - Initializes a session structure * @sess: Session to initialize * @flags: Flags to use. Any of %AIM_SESS_FLAGS %OR'd together. * @debuglevel: Level of debugging output (zero is least) * * Sets up the initial values for a session. * */ void aim_session_init(aim_session_t *sess, guint32 flags, int debuglevel) { if (!sess) return; memset(sess, 0, sizeof(aim_session_t)); aim_connrst(sess); sess->queue_outgoing = NULL; sess->queue_incoming = NULL; aim_initsnachash(sess); sess->msgcookies = NULL; sess->snacid_next = 0x00000001; sess->flags = 0; sess->modlistv = NULL; sess->ssi.received_data = 0; sess->ssi.waiting_for_ack = 0; sess->ssi.holding_queue = NULL; sess->ssi.revision = 0; sess->ssi.items = NULL; sess->ssi.timestamp = (time_t)0; sess->locate.userinfo = NULL; sess->locate.torequest = NULL; sess->locate.requested = NULL; sess->locate.waiting_for_response = FALSE; sess->icq_info = NULL; sess->authinfo = NULL; sess->emailinfo = NULL; sess->oft_info = NULL; /* * Default to SNAC login unless XORLOGIN is explicitly set. */ if (!(flags & AIM_SESS_FLAGS_XORLOGIN)) sess->flags |= AIM_SESS_FLAGS_SNACLOGIN; sess->flags |= flags; /* * This must always be set. Default to the queue-based * version for back-compatibility. */ aim_tx_setenqueue(sess, AIM_TX_QUEUED, NULL); /* * Register all the modules for this session... */ aim__registermodule(sess, misc_modfirst); /* load the catch-all first */ aim__registermodule(sess, general_modfirst); aim__registermodule(sess, locate_modfirst); aim__registermodule(sess, buddylist_modfirst); aim__registermodule(sess, msg_modfirst); aim__registermodule(sess, admin_modfirst); aim__registermodule(sess, bos_modfirst); aim__registermodule(sess, search_modfirst); aim__registermodule(sess, stats_modfirst); aim__registermodule(sess, chatnav_modfirst); aim__registermodule(sess, chat_modfirst); /* missing 0x0f - 0x12 */ aim__registermodule(sess, ssi_modfirst); /* missing 0x14 */ aim__registermodule(sess, icq_modfirst); /* missing 0x16 */ aim__registermodule(sess, auth_modfirst); return; } /** * aim_session_kill - Deallocate a session * @sess: Session to kill * */ void aim_session_kill(aim_session_t *sess) { aim_cleansnacs(sess, -1); aim_logoff(sess); aim__shutdownmodules(sess); return; } /* * XXX this is nearly as ugly as proxyconnect(). */ int aim_conn_completeconnect(aim_session_t *sess, aim_conn_t *conn) { fd_set fds, wfds; struct timeval tv; int res, error = ETIMEDOUT; aim_rxcallback_t userfunc; if (!conn || (conn->fd == -1)) return -1; if (!(conn->status & AIM_CONN_STATUS_INPROGRESS)) return -1; FD_ZERO(&fds); FD_SET(conn->fd, &fds); FD_ZERO(&wfds); FD_SET(conn->fd, &wfds); tv.tv_sec = 0; tv.tv_usec = 0; if ((res = select(conn->fd+1, &fds, &wfds, NULL, &tv)) == -1) { error = errno; aim_conn_close(conn); errno = error; return -1; } else if (res == 0) { return 0; /* hasn't really completed yet... */ } if (FD_ISSET(conn->fd, &fds) || FD_ISSET(conn->fd, &wfds)) { socklen_t len = sizeof(error); if (getsockopt(conn->fd, SOL_SOCKET, SO_ERROR, &error, &len) < 0) error = errno; } if (error) { aim_conn_close(conn); errno = error; return -1; } sock_make_blocking(conn->fd); conn->status &= ~AIM_CONN_STATUS_INPROGRESS; if ((userfunc = aim_callhandler(sess, conn, AIM_CB_FAM_SPECIAL, AIM_CB_SPECIAL_CONNCOMPLETE))) userfunc(sess, NULL, conn); /* Flush out the queues if there was something waiting for this conn */ aim_tx_flushqueue(sess); return 0; } aim_session_t *aim_conn_getsess(aim_conn_t *conn) { if (!conn) return NULL; return (aim_session_t *)conn->sessv; } /* * aim_logoff() * * Closes -ALL- open connections. * */ static int aim_logoff(aim_session_t *sess) { aim_connrst(sess); /* in case we want to connect again */ return 0; } /* * aim_flap_nop() * * No-op. WinAIM 4.x sends these _every minute_ to keep * the connection alive. */ int aim_flap_nop(aim_session_t *sess, aim_conn_t *conn) { aim_frame_t *fr; if (!(fr = aim_tx_new(sess, conn, AIM_FRAMETYPE_FLAP, 0x05, 0))) return -ENOMEM; aim_tx_enqueue(sess, fr); return 0; }
__label__pos
0.99952